var/home/core/zuul-output/0000755000175000017500000000000015111317667014535 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111331002015454 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005247402115111330773017703 0ustar rootrootNov 25 12:27:33 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 12:27:33 crc restorecon[4579]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:33 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 12:27:34 crc restorecon[4579]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 12:27:35 crc kubenswrapper[4675]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 12:27:35 crc kubenswrapper[4675]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 12:27:35 crc kubenswrapper[4675]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 12:27:35 crc kubenswrapper[4675]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 12:27:35 crc kubenswrapper[4675]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 12:27:35 crc kubenswrapper[4675]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.298869 4675 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305531 4675 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305567 4675 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305578 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305588 4675 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305597 4675 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305605 4675 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305617 4675 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305628 4675 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305641 4675 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305674 4675 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305694 4675 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305704 4675 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305715 4675 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305724 4675 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305734 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305743 4675 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305752 4675 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305760 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305769 4675 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305776 4675 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305784 4675 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305794 4675 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305810 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305865 4675 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305875 4675 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305886 4675 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305896 4675 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305905 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305913 4675 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305921 4675 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305929 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305938 4675 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305946 4675 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305958 4675 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305968 4675 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305977 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305985 4675 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.305994 4675 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306002 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306013 4675 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306021 4675 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306029 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306036 4675 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306046 4675 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306055 4675 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306062 4675 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306070 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306078 4675 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306085 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306093 4675 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306101 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306108 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306116 4675 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306126 4675 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306135 4675 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306144 4675 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306152 4675 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306161 4675 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306169 4675 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306177 4675 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306186 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306195 4675 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306202 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306211 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306220 4675 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306231 4675 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306240 4675 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306248 4675 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306256 4675 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306264 4675 feature_gate.go:330] unrecognized feature gate: Example Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.306272 4675 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307360 4675 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307388 4675 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307403 4675 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307414 4675 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307425 4675 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307435 4675 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307446 4675 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307457 4675 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307466 4675 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307475 4675 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307485 4675 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307494 4675 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307504 4675 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307513 4675 flags.go:64] FLAG: --cgroup-root="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307522 4675 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307531 4675 flags.go:64] FLAG: --client-ca-file="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307540 4675 flags.go:64] FLAG: --cloud-config="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307549 4675 flags.go:64] FLAG: --cloud-provider="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307558 4675 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307572 4675 flags.go:64] FLAG: --cluster-domain="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307581 4675 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307592 4675 flags.go:64] FLAG: --config-dir="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307602 4675 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307611 4675 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307622 4675 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307631 4675 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307641 4675 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307650 4675 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307658 4675 flags.go:64] FLAG: --contention-profiling="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307668 4675 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307677 4675 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307687 4675 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307696 4675 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307707 4675 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307716 4675 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307725 4675 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307734 4675 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307742 4675 flags.go:64] FLAG: --enable-server="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307751 4675 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307762 4675 flags.go:64] FLAG: --event-burst="100" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307771 4675 flags.go:64] FLAG: --event-qps="50" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307780 4675 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307789 4675 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307798 4675 flags.go:64] FLAG: --eviction-hard="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307809 4675 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307849 4675 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307859 4675 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307871 4675 flags.go:64] FLAG: --eviction-soft="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307880 4675 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307889 4675 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307898 4675 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307906 4675 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307917 4675 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307926 4675 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307934 4675 flags.go:64] FLAG: --feature-gates="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307945 4675 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307954 4675 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307964 4675 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307973 4675 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307983 4675 flags.go:64] FLAG: --healthz-port="10248" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.307992 4675 flags.go:64] FLAG: --help="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308001 4675 flags.go:64] FLAG: --hostname-override="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308010 4675 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308019 4675 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308028 4675 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308037 4675 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308046 4675 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308055 4675 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308064 4675 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308073 4675 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308081 4675 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308090 4675 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308099 4675 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308109 4675 flags.go:64] FLAG: --kube-reserved="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308117 4675 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308127 4675 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308136 4675 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308145 4675 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308154 4675 flags.go:64] FLAG: --lock-file="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308163 4675 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308173 4675 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308182 4675 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308195 4675 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308204 4675 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308213 4675 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308222 4675 flags.go:64] FLAG: --logging-format="text" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308231 4675 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308240 4675 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308249 4675 flags.go:64] FLAG: --manifest-url="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308258 4675 flags.go:64] FLAG: --manifest-url-header="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308269 4675 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308278 4675 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308289 4675 flags.go:64] FLAG: --max-pods="110" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308297 4675 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308307 4675 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308316 4675 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308324 4675 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308333 4675 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308342 4675 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308352 4675 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308370 4675 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308381 4675 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308390 4675 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308399 4675 flags.go:64] FLAG: --pod-cidr="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308408 4675 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308420 4675 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308429 4675 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308438 4675 flags.go:64] FLAG: --pods-per-core="0" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308447 4675 flags.go:64] FLAG: --port="10250" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308456 4675 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308497 4675 flags.go:64] FLAG: --provider-id="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308508 4675 flags.go:64] FLAG: --qos-reserved="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308517 4675 flags.go:64] FLAG: --read-only-port="10255" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308527 4675 flags.go:64] FLAG: --register-node="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308537 4675 flags.go:64] FLAG: --register-schedulable="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308545 4675 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308560 4675 flags.go:64] FLAG: --registry-burst="10" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308569 4675 flags.go:64] FLAG: --registry-qps="5" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308577 4675 flags.go:64] FLAG: --reserved-cpus="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308586 4675 flags.go:64] FLAG: --reserved-memory="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308597 4675 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308606 4675 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308615 4675 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308624 4675 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308634 4675 flags.go:64] FLAG: --runonce="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308642 4675 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308651 4675 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308661 4675 flags.go:64] FLAG: --seccomp-default="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308669 4675 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308678 4675 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308687 4675 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308696 4675 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308705 4675 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308714 4675 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308723 4675 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308732 4675 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308741 4675 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308751 4675 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308760 4675 flags.go:64] FLAG: --system-cgroups="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308769 4675 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308783 4675 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308792 4675 flags.go:64] FLAG: --tls-cert-file="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308801 4675 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308812 4675 flags.go:64] FLAG: --tls-min-version="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308844 4675 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308853 4675 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308862 4675 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308871 4675 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308880 4675 flags.go:64] FLAG: --v="2" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308892 4675 flags.go:64] FLAG: --version="false" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308903 4675 flags.go:64] FLAG: --vmodule="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308913 4675 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.308922 4675 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309122 4675 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309134 4675 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309144 4675 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309153 4675 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309162 4675 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309170 4675 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309178 4675 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309187 4675 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309195 4675 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309202 4675 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309210 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309217 4675 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309225 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309234 4675 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309242 4675 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309249 4675 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309257 4675 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309264 4675 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309273 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309281 4675 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309297 4675 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309305 4675 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309313 4675 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309321 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309329 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309337 4675 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309344 4675 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309352 4675 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309360 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309368 4675 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309376 4675 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309384 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309391 4675 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309399 4675 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309407 4675 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309415 4675 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309422 4675 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309430 4675 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309438 4675 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309446 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309454 4675 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309461 4675 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309469 4675 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309476 4675 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309490 4675 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309500 4675 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309508 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309516 4675 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309524 4675 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309531 4675 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309539 4675 feature_gate.go:330] unrecognized feature gate: Example Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309547 4675 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309555 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309562 4675 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309572 4675 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309582 4675 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309592 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309603 4675 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309613 4675 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309623 4675 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309634 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309651 4675 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309673 4675 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309684 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309696 4675 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309707 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309718 4675 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309731 4675 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309743 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309754 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.309764 4675 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.309776 4675 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.325611 4675 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.325680 4675 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325809 4675 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325830 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325837 4675 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325844 4675 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325849 4675 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325855 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325861 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325892 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325900 4675 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325908 4675 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325915 4675 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325922 4675 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325929 4675 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325938 4675 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325973 4675 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325980 4675 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325986 4675 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325992 4675 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.325999 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326005 4675 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326010 4675 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326015 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326021 4675 feature_gate.go:330] unrecognized feature gate: Example Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326026 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326030 4675 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326055 4675 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326060 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326066 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326071 4675 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326076 4675 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326081 4675 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326086 4675 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326090 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326095 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326100 4675 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326106 4675 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326131 4675 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326137 4675 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326142 4675 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326149 4675 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326155 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326160 4675 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326167 4675 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326173 4675 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326178 4675 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326183 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326187 4675 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326211 4675 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326216 4675 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326222 4675 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326227 4675 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326231 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326236 4675 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326241 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326246 4675 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326250 4675 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326255 4675 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326260 4675 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326264 4675 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326270 4675 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326297 4675 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326304 4675 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326310 4675 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326316 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326321 4675 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326327 4675 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326332 4675 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326336 4675 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326341 4675 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326347 4675 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326372 4675 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.326384 4675 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326624 4675 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326637 4675 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326643 4675 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326648 4675 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326654 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326659 4675 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326664 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326670 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326675 4675 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326699 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326705 4675 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326710 4675 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326715 4675 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326720 4675 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326725 4675 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326730 4675 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326735 4675 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326740 4675 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326746 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326750 4675 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326773 4675 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326778 4675 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326783 4675 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326788 4675 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326794 4675 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326799 4675 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326804 4675 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326847 4675 feature_gate.go:330] unrecognized feature gate: Example Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326853 4675 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326859 4675 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326863 4675 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326869 4675 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326876 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326886 4675 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326897 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326904 4675 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326946 4675 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326953 4675 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326960 4675 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326967 4675 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326973 4675 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326980 4675 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326985 4675 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326990 4675 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.326995 4675 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327000 4675 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327026 4675 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327031 4675 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327036 4675 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327041 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327048 4675 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327055 4675 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327063 4675 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327069 4675 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327075 4675 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327080 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327105 4675 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327112 4675 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327118 4675 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327123 4675 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327129 4675 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327135 4675 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327141 4675 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327147 4675 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327153 4675 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327159 4675 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327164 4675 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327188 4675 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327195 4675 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327200 4675 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.327207 4675 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.327215 4675 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.327484 4675 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.332327 4675 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.332437 4675 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.333892 4675 server.go:997] "Starting client certificate rotation" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.333942 4675 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.335215 4675 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-27 00:08:59.382889545 +0000 UTC Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.335320 4675 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 755h41m24.047574332s for next certificate rotation Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.356977 4675 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.361137 4675 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.379433 4675 log.go:25] "Validated CRI v1 runtime API" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.419186 4675 log.go:25] "Validated CRI v1 image API" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.421276 4675 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.429436 4675 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-12-21-55-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.429475 4675 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.441154 4675 manager.go:217] Machine: {Timestamp:2025-11-25 12:27:35.439704165 +0000 UTC m=+0.611296526 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199472640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:04be46d1-9e64-47f5-b017-0d678111e234 BootID:7da5d77d-65e7-4977-b3b0-0de1398892b4 Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039894528 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599738368 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:3076107 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599734272 Type:vfs Inodes:3076107 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:43:b3:cf Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:43:b3:cf Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:46:42:e6 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:f3:22:fc Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:c3:91:f5 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:d6:0b:da Speed:-1 Mtu:1496} {Name:eth10 MacAddress:2e:e5:8f:f7:22:a8 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:e2:dd:cc:46:9b:de Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199472640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.441324 4675 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.441494 4675 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.443018 4675 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.443178 4675 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.443208 4675 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.443373 4675 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.443384 4675 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.444071 4675 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.444103 4675 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.444251 4675 state_mem.go:36] "Initialized new in-memory state store" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.444785 4675 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.448037 4675 kubelet.go:418] "Attempting to sync node with API server" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.448068 4675 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.448097 4675 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.448112 4675 kubelet.go:324] "Adding apiserver pod source" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.448137 4675 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.454466 4675 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.455804 4675 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.457368 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.457428 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.457357 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.457483 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.457870 4675 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459594 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459623 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459645 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459654 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459668 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459677 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459685 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459699 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459710 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459719 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459747 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.459756 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.460893 4675 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.463759 4675 server.go:1280] "Started kubelet" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.463931 4675 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.464404 4675 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.464788 4675 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.464940 4675 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 12:27:35 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.467536 4675 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.467573 4675 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.467868 4675 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 17:56:10.276009683 +0000 UTC Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.468023 4675 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 797h28m34.80798989s for next certificate rotation Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.468135 4675 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.468175 4675 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.468256 4675 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.469364 4675 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.469402 4675 server.go:460] "Adding debug handlers to kubelet server" Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.470180 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.472481 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.472710 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="200ms" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474003 4675 factory.go:153] Registering CRI-O factory Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474036 4675 factory.go:221] Registration of the crio container factory successfully Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474122 4675 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474135 4675 factory.go:55] Registering systemd factory Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474146 4675 factory.go:221] Registration of the systemd container factory successfully Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474170 4675 factory.go:103] Registering Raw factory Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474190 4675 manager.go:1196] Started watching for new ooms in manager Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.474802 4675 manager.go:319] Starting recovery of all containers Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475337 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475389 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475409 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475419 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475427 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475436 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475463 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475473 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475484 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475493 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475502 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475511 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475520 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475533 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475544 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475554 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475563 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475572 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475581 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475591 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475599 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475608 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475617 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475626 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475635 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475646 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475657 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475667 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475676 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475689 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475698 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475706 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475716 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475725 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475734 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475742 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475751 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475762 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475771 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475781 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475791 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475799 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475808 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475832 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475842 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475852 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475861 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475870 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475878 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475887 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475895 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475903 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475917 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475926 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475935 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475945 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475953 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475962 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475970 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475980 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475989 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.475997 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476005 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476016 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476035 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476050 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476061 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476073 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476084 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476095 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476111 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476125 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476134 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476144 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476154 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476163 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476172 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476181 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476191 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476200 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476209 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476218 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476229 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476237 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476255 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476263 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476273 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476283 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476293 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476307 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476326 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476338 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476347 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476356 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476368 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476380 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476392 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476401 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476410 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476423 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476468 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476488 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476500 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476515 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476534 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476546 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476559 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476605 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476615 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476631 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476654 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476663 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476674 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476682 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476691 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476699 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476707 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476714 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476722 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476732 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476739 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476747 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476756 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476763 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476771 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476779 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476788 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476796 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476804 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476813 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476850 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476860 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476868 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476878 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476887 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476896 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476905 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476913 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476923 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476931 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476939 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476947 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476957 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476965 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476974 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476982 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.476990 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477002 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477034 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477046 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477058 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477071 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477079 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477088 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477097 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477108 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477128 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477143 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477155 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477166 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477179 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477188 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477196 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477205 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477213 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477221 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477231 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477240 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477249 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477257 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477266 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477275 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.477284 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.475189 4675 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.9:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b3fa4a78a5670 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 12:27:35.463728752 +0000 UTC m=+0.635321093,LastTimestamp:2025-11-25 12:27:35.463728752 +0000 UTC m=+0.635321093,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482063 4675 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482507 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482539 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482575 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482591 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482609 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482622 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482635 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482652 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482664 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482677 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482696 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482711 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482740 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482755 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482767 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482784 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482796 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482826 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482839 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482852 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482869 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482881 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482898 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482912 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482928 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482945 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482959 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482975 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.482988 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483001 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483017 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483033 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483050 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483066 4675 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483076 4675 reconstruct.go:97] "Volume reconstruction finished" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.483085 4675 reconciler.go:26] "Reconciler: start to sync state" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.496377 4675 manager.go:324] Recovery completed Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.505037 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.506926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.507009 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.507079 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.508661 4675 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.508681 4675 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.508701 4675 state_mem.go:36] "Initialized new in-memory state store" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.525437 4675 policy_none.go:49] "None policy: Start" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.527029 4675 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.527067 4675 state_mem.go:35] "Initializing new in-memory state store" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.528745 4675 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.530518 4675 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.531022 4675 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.531074 4675 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.531145 4675 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 12:27:35 crc kubenswrapper[4675]: W1125 12:27:35.532070 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.532128 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.568861 4675 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.581979 4675 manager.go:334] "Starting Device Plugin manager" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582029 4675 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582041 4675 server.go:79] "Starting device plugin registration server" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582417 4675 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582433 4675 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582561 4675 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582646 4675 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.582660 4675 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.592754 4675 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.631924 4675 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.632017 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.633012 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.633058 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.633073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.633282 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.633455 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.633499 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.634444 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.634476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.634498 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.634700 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.634921 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.635068 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.635090 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.635095 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.635187 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.636285 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.636306 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.636315 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.636438 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.636571 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.636618 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637112 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637131 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637156 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637707 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637748 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637761 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.637963 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.638106 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.638136 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.638230 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.638270 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.638286 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639187 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639295 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639311 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639191 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639375 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639500 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.639535 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.640870 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.640894 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.640902 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.673795 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="400ms" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.683732 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685455 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685481 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685527 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685547 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685563 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685592 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685607 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685622 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685637 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685651 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685665 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685692 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685715 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685733 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.685751 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.686078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.686127 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.686167 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.686248 4675 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.687260 4675 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.9:6443: connect: connection refused" node="crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786834 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786894 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786920 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786942 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786959 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786976 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.786994 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787011 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787028 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787043 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787063 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787083 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787101 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787119 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787137 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787527 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787588 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787637 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787692 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787731 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787765 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787796 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787846 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787883 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787918 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787951 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787983 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.787999 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.788025 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.788055 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.888440 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.889616 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.889651 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.889662 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.889688 4675 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 12:27:35 crc kubenswrapper[4675]: E1125 12:27:35.890063 4675 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.9:6443: connect: connection refused" node="crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.963697 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.972401 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 12:27:35 crc kubenswrapper[4675]: I1125 12:27:35.990928 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.005232 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-24141ddd13aca3e45ad53b4259e5fe2705fca501bba32db2242ce527b273216f WatchSource:0}: Error finding container 24141ddd13aca3e45ad53b4259e5fe2705fca501bba32db2242ce527b273216f: Status 404 returned error can't find the container with id 24141ddd13aca3e45ad53b4259e5fe2705fca501bba32db2242ce527b273216f Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.014209 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.018832 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.037325 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-7b64012db0f7bd81564a24a3e230e3467d684ad6f7d435ee7c93cbe5231cfb3c WatchSource:0}: Error finding container 7b64012db0f7bd81564a24a3e230e3467d684ad6f7d435ee7c93cbe5231cfb3c: Status 404 returned error can't find the container with id 7b64012db0f7bd81564a24a3e230e3467d684ad6f7d435ee7c93cbe5231cfb3c Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.039041 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-9ba46fd1d9a40d1920f204b26ac2f63700386a1ee77ed7031186cbabd7d0ca26 WatchSource:0}: Error finding container 9ba46fd1d9a40d1920f204b26ac2f63700386a1ee77ed7031186cbabd7d0ca26: Status 404 returned error can't find the container with id 9ba46fd1d9a40d1920f204b26ac2f63700386a1ee77ed7031186cbabd7d0ca26 Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.075069 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="800ms" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.290520 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.291997 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.292030 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.292041 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.292063 4675 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.292505 4675 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.9:6443: connect: connection refused" node="crc" Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.305128 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.305276 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.360354 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.360486 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.466270 4675 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.524425 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.524488 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:36 crc kubenswrapper[4675]: W1125 12:27:36.531028 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.531065 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.534847 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9ba46fd1d9a40d1920f204b26ac2f63700386a1ee77ed7031186cbabd7d0ca26"} Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.535598 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7b64012db0f7bd81564a24a3e230e3467d684ad6f7d435ee7c93cbe5231cfb3c"} Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.540549 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e445791b0f0ad24b9b8bcf672b95201cdfdad8de66837d89195cb723ddd3aa53"} Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.542082 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"24141ddd13aca3e45ad53b4259e5fe2705fca501bba32db2242ce527b273216f"} Nov 25 12:27:36 crc kubenswrapper[4675]: I1125 12:27:36.542873 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"f8e6adb4bee1964797455bcb4431eb47decde29fddffbfe8c0df5d0f1e0e4bb8"} Nov 25 12:27:36 crc kubenswrapper[4675]: E1125 12:27:36.876467 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="1.6s" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.092986 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.094100 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.094132 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.094144 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.094167 4675 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 12:27:37 crc kubenswrapper[4675]: E1125 12:27:37.094588 4675 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.9:6443: connect: connection refused" node="crc" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.465874 4675 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.547322 4675 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2" exitCode=0 Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.547420 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.547419 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.548157 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.548191 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.548203 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.549304 4675 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6" exitCode=0 Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.549430 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.549724 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.550221 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.550236 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.550245 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.552057 4675 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d" exitCode=0 Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.552111 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.552204 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.554174 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.554207 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.554221 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.555597 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.555637 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.555653 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.555667 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.555620 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.556457 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.556481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.556489 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.556583 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3" exitCode=0 Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.556613 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3"} Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.556706 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.557315 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.557340 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.557349 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.563944 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.564649 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.564677 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:37 crc kubenswrapper[4675]: I1125 12:27:37.564686 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:38 crc kubenswrapper[4675]: E1125 12:27:38.134629 4675 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.9:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b3fa4a78a5670 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 12:27:35.463728752 +0000 UTC m=+0.635321093,LastTimestamp:2025-11-25 12:27:35.463728752 +0000 UTC m=+0.635321093,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 12:27:38 crc kubenswrapper[4675]: W1125 12:27:38.332500 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:38 crc kubenswrapper[4675]: E1125 12:27:38.332767 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.466084 4675 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:38 crc kubenswrapper[4675]: E1125 12:27:38.477902 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="3.2s" Nov 25 12:27:38 crc kubenswrapper[4675]: W1125 12:27:38.522747 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:38 crc kubenswrapper[4675]: E1125 12:27:38.522843 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.560421 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.560466 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.561425 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.561457 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.561469 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.562439 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.562461 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.562471 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.562559 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.563383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.563459 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.563513 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.565505 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.565621 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.565688 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.565765 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.566997 4675 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6" exitCode=0 Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.567281 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.567199 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.567130 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6"} Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.568273 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.568290 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.568615 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.568630 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.568304 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.568831 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.695794 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.696807 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.696849 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.696860 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.696882 4675 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 12:27:38 crc kubenswrapper[4675]: E1125 12:27:38.697269 4675 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.9:6443: connect: connection refused" node="crc" Nov 25 12:27:38 crc kubenswrapper[4675]: I1125 12:27:38.803736 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:39 crc kubenswrapper[4675]: W1125 12:27:39.235211 4675 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.9:6443: connect: connection refused Nov 25 12:27:39 crc kubenswrapper[4675]: E1125 12:27:39.235301 4675 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.9:6443: connect: connection refused" logger="UnhandledError" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.573044 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.575599 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c" exitCode=255 Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.575657 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c"} Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.575809 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.577393 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.577457 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.577476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.578124 4675 scope.go:117] "RemoveContainer" containerID="e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.582545 4675 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec" exitCode=0 Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.582700 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.582725 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.582703 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec"} Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.582726 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.583006 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.582926 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592492 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592523 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592597 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592617 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592642 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592650 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592662 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592715 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592523 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592804 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.592847 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:39 crc kubenswrapper[4675]: I1125 12:27:39.636317 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.197417 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.591136 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc"} Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.591178 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf"} Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.591188 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2"} Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.591199 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666"} Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.591208 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de"} Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.591328 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.592225 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.592245 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.592253 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.593949 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.595594 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8"} Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.595621 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.595624 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.596727 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.596770 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.596782 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.597364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.597548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:40 crc kubenswrapper[4675]: I1125 12:27:40.597563 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.006399 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.006644 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.008229 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.008277 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.008294 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.598684 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.598805 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.599524 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.600182 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.600227 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.600249 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.600275 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.600305 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.600329 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.716280 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.716401 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.717720 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.717774 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.717796 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.723919 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.837969 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.897849 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.900251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.900331 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.900352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:41 crc kubenswrapper[4675]: I1125 12:27:41.900412 4675 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.560141 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.600729 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.600735 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.601577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.601623 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.601633 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.602032 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.602067 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:42 crc kubenswrapper[4675]: I1125 12:27:42.602079 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.602965 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.603647 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.603896 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.603938 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.603953 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.605256 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.605275 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:43 crc kubenswrapper[4675]: I1125 12:27:43.605298 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:44 crc kubenswrapper[4675]: I1125 12:27:44.006486 4675 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 12:27:44 crc kubenswrapper[4675]: I1125 12:27:44.006572 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:27:45 crc kubenswrapper[4675]: I1125 12:27:45.242250 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 12:27:45 crc kubenswrapper[4675]: I1125 12:27:45.242530 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:45 crc kubenswrapper[4675]: I1125 12:27:45.243970 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:45 crc kubenswrapper[4675]: I1125 12:27:45.244012 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:45 crc kubenswrapper[4675]: I1125 12:27:45.244028 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:45 crc kubenswrapper[4675]: E1125 12:27:45.593319 4675 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 12:27:47 crc kubenswrapper[4675]: I1125 12:27:47.574718 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 12:27:47 crc kubenswrapper[4675]: I1125 12:27:47.574987 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:47 crc kubenswrapper[4675]: I1125 12:27:47.576088 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:47 crc kubenswrapper[4675]: I1125 12:27:47.576198 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:47 crc kubenswrapper[4675]: I1125 12:27:47.576276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:48 crc kubenswrapper[4675]: I1125 12:27:48.808797 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:27:48 crc kubenswrapper[4675]: I1125 12:27:48.808955 4675 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 12:27:48 crc kubenswrapper[4675]: I1125 12:27:48.810478 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:48 crc kubenswrapper[4675]: I1125 12:27:48.810514 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:48 crc kubenswrapper[4675]: I1125 12:27:48.810525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.467002 4675 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.545751 4675 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.545805 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.565288 4675 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.565344 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.642129 4675 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]log ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]etcd ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/priority-and-fairness-filter ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-apiextensions-informers ok Nov 25 12:27:49 crc kubenswrapper[4675]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Nov 25 12:27:49 crc kubenswrapper[4675]: [-]poststarthook/crd-informer-synced failed: reason withheld Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-system-namespaces-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 25 12:27:49 crc kubenswrapper[4675]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 25 12:27:49 crc kubenswrapper[4675]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/bootstrap-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/start-kube-aggregator-informers ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [-]poststarthook/apiservice-registration-controller failed: reason withheld Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 25 12:27:49 crc kubenswrapper[4675]: [-]poststarthook/apiservice-discovery-controller failed: reason withheld Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]autoregister-completion ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/apiservice-openapi-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 25 12:27:49 crc kubenswrapper[4675]: livez check failed Nov 25 12:27:49 crc kubenswrapper[4675]: I1125 12:27:49.642192 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:27:50 crc kubenswrapper[4675]: I1125 12:27:50.198306 4675 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 12:27:50 crc kubenswrapper[4675]: I1125 12:27:50.198400 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.008223 4675 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.008292 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.545224 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.550901 4675 trace.go:236] Trace[506602443]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 12:27:44.531) (total time: 10019ms): Nov 25 12:27:54 crc kubenswrapper[4675]: Trace[506602443]: ---"Objects listed" error: 10019ms (12:27:54.550) Nov 25 12:27:54 crc kubenswrapper[4675]: Trace[506602443]: [10.019200563s] [10.019200563s] END Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.550930 4675 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.551974 4675 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.552378 4675 trace.go:236] Trace[1835437219]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 12:27:43.912) (total time: 10640ms): Nov 25 12:27:54 crc kubenswrapper[4675]: Trace[1835437219]: ---"Objects listed" error: 10640ms (12:27:54.552) Nov 25 12:27:54 crc kubenswrapper[4675]: Trace[1835437219]: [10.640072081s] [10.640072081s] END Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.552398 4675 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.552475 4675 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.552543 4675 trace.go:236] Trace[1361169530]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 12:27:39.586) (total time: 14966ms): Nov 25 12:27:54 crc kubenswrapper[4675]: Trace[1361169530]: ---"Objects listed" error: 14966ms (12:27:54.552) Nov 25 12:27:54 crc kubenswrapper[4675]: Trace[1361169530]: [14.966159401s] [14.966159401s] END Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.552561 4675 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.569744 4675 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.570048 4675 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.571392 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.571426 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.571437 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.571462 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.571475 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.597390 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.604208 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.604265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.604276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.604296 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.604308 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.625439 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.630507 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.630563 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.630575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.630594 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.630606 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.643054 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.644627 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.648646 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.649850 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.649885 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.649894 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.649908 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.649917 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.662496 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.665626 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.665684 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.665695 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.665712 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.665723 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.678607 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:54Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:54 crc kubenswrapper[4675]: E1125 12:27:54.678759 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.680635 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.680669 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.680680 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.680700 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.680712 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.782495 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.782525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.782535 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.782563 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.782573 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.884619 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.884656 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.884665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.884680 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.884688 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.986865 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.986911 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.986929 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.986948 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:54 crc kubenswrapper[4675]: I1125 12:27:54.986959 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:54Z","lastTransitionTime":"2025-11-25T12:27:54Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.047849 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.089204 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.089240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.089251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.089271 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.089281 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.191586 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.191842 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.191945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.192045 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.192131 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.294805 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.294879 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.294892 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.294914 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.294928 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.397600 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.397636 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.397646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.397663 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.397673 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.458877 4675 apiserver.go:52] "Watching apiserver" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.461201 4675 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.461499 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-xkhpr","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.462174 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.462691 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.462792 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.462987 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.462713 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.463636 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.463704 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.463727 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.463842 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.464490 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.467054 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.467126 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.467127 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.467843 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.468731 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.468844 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.469385 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.470573 4675 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.476261 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.476277 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.477006 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.477082 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.477204 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.493140 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.500903 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.500956 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.500967 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.500986 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.500998 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.508114 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.519762 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.532738 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.545119 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.553228 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560384 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560442 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560466 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560493 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560517 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560539 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.561627 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560805 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.560975 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.561016 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.561369 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.561555 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.561848 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.561920 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562080 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562104 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562125 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562330 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562147 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562400 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562627 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562660 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562688 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562693 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562698 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562775 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562797 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562828 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562844 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562861 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562875 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562890 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562905 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562920 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562935 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562956 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.562997 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563025 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563051 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563069 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563090 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563111 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563133 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563152 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563173 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563194 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563214 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563235 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563262 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563280 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563298 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563315 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563331 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563346 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563366 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563387 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563403 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563417 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563431 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563446 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563460 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563475 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563490 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563505 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563540 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563564 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563580 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563807 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563839 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564133 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564148 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564164 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564190 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564696 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564734 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564758 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564780 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564808 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564849 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564876 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564929 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564952 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564978 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565001 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565024 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565051 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565075 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565098 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565124 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565147 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565169 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565191 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565213 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565247 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565268 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565290 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565313 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565339 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565362 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565382 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565403 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565429 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565445 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565461 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565480 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565495 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565510 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565525 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565541 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565558 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565575 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565591 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565607 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565622 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565638 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565653 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565670 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565705 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565752 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565772 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565795 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565827 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565846 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565861 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565877 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565894 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565910 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565926 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565943 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565957 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566003 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566023 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566057 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566079 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566099 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566117 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566133 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566149 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566164 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566185 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566250 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566276 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566291 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566307 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566323 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566340 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566356 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566372 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566387 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566403 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566419 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566434 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566449 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566465 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566481 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566496 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566521 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566544 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566568 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566592 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566613 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566634 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566656 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566674 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566690 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566707 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566725 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566744 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566761 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566777 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566830 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566849 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566866 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566888 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566906 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566925 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566942 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566959 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566976 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566992 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567008 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567024 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567041 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567058 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567074 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567096 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567119 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567220 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567239 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567259 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567276 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567293 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567310 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567327 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567343 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567359 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567376 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567392 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567407 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567423 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567440 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567458 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567480 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567502 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567526 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567580 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567612 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567630 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567648 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567671 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567689 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567709 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b67hl\" (UniqueName: \"kubernetes.io/projected/f78a3216-c1be-4cef-bf38-12b24f061f07-kube-api-access-b67hl\") pod \"node-resolver-xkhpr\" (UID: \"f78a3216-c1be-4cef-bf38-12b24f061f07\") " pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567728 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567745 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567764 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567781 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/f78a3216-c1be-4cef-bf38-12b24f061f07-hosts-file\") pod \"node-resolver-xkhpr\" (UID: \"f78a3216-c1be-4cef-bf38-12b24f061f07\") " pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567798 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568098 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568124 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568145 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568165 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568218 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568230 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568241 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568251 4675 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568260 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568271 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568281 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568290 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568300 4675 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568310 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.572004 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.593319 4675 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.595743 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563360 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.563738 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564036 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564095 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564133 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564180 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564592 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564643 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564739 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.564808 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565112 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565205 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565219 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565307 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.565637 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.566337 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567386 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567406 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567560 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.567711 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.568007 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.570706 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.571026 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.571294 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.571764 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.580137 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600019 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600023 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.580318 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.581232 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.581681 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600061 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600051 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.582043 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.582562 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.583543 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.583543 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.583603 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.583749 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.583950 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.584435 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.584657 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.585020 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.585040 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.585254 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.585703 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.585902 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.587254 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.587535 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.587596 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.587851 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.588485 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.588896 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589089 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589126 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589166 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589393 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589410 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589518 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.589541 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.590089 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.590703 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.591064 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.591334 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.591616 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.591669 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.591729 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.591921 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592036 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592192 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592192 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592262 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592279 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592383 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592391 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592582 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592648 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592665 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592593 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.592893 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.593006 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.593287 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.593845 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594073 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594149 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594196 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594381 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594389 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594432 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.594602 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.595030 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.595284 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.585850 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.587724 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.595476 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.595523 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.595653 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:27:56.095630069 +0000 UTC m=+21.267222410 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.595668 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.596146 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.596161 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.597755 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.597801 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.597952 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.598036 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.598092 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.598288 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.598455 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599013 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599029 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599056 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599126 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599143 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599175 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599177 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599268 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599278 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599624 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.599656 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600117 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600230 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.600361 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.602945 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.604042 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.604229 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.607131 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.608258 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.608612 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.608639 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.608650 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.604311 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.605530 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.605690 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.605893 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.606111 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.606302 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.609864 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:56.109843443 +0000 UTC m=+21.281435784 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.609894 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:56.109887635 +0000 UTC m=+21.281479976 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.609907 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:56.109900395 +0000 UTC m=+21.281492736 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.609807 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.613271 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.614914 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.614947 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.614971 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.614985 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.619137 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.619083 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.628001 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.628202 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.628227 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.628267 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.628342 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:56.128303831 +0000 UTC m=+21.299896172 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.628540 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.631372 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.636460 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: E1125 12:27:55.638802 4675 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.670179 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b67hl\" (UniqueName: \"kubernetes.io/projected/f78a3216-c1be-4cef-bf38-12b24f061f07-kube-api-access-b67hl\") pod \"node-resolver-xkhpr\" (UID: \"f78a3216-c1be-4cef-bf38-12b24f061f07\") " pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.670459 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/f78a3216-c1be-4cef-bf38-12b24f061f07-hosts-file\") pod \"node-resolver-xkhpr\" (UID: \"f78a3216-c1be-4cef-bf38-12b24f061f07\") " pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.670586 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.671078 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.670586 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/f78a3216-c1be-4cef-bf38-12b24f061f07-hosts-file\") pod \"node-resolver-xkhpr\" (UID: \"f78a3216-c1be-4cef-bf38-12b24f061f07\") " pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.671257 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.670653 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.671658 4675 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.671805 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.672020 4675 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.672105 4675 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.672182 4675 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.672264 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.672338 4675 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.672555 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673406 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673438 4675 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673454 4675 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673469 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673482 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673496 4675 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673508 4675 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673520 4675 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673533 4675 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673543 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673554 4675 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673565 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673576 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673589 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673600 4675 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673611 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673623 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673635 4675 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673645 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673657 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673667 4675 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673678 4675 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673690 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673703 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673714 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673726 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673737 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673748 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673758 4675 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673770 4675 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673781 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673791 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673802 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673831 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673843 4675 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673854 4675 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673866 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673876 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673887 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673898 4675 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673909 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673924 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673934 4675 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673943 4675 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673953 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673986 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.673997 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674044 4675 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674055 4675 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674066 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674077 4675 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674088 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674099 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674111 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674122 4675 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674133 4675 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674145 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674157 4675 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674168 4675 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674179 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674191 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674201 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674212 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674225 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674237 4675 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674249 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674262 4675 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674274 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674284 4675 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674294 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674303 4675 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674314 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674324 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674334 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674343 4675 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674355 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674367 4675 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674379 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674392 4675 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674403 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674414 4675 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674424 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674436 4675 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674446 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674456 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674468 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674480 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674491 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674501 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674511 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674524 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674538 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674549 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674560 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674571 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674581 4675 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674594 4675 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674605 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674617 4675 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674628 4675 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674638 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674649 4675 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674660 4675 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674671 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674684 4675 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674695 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674705 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674716 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674726 4675 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674737 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674749 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674760 4675 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674772 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674782 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674792 4675 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674804 4675 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674834 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674846 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674857 4675 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674867 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.674880 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678254 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678322 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678469 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678639 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678837 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678902 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678919 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.678970 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679154 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679246 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679585 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679617 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680160 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679632 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679882 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680191 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680023 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680067 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680106 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.679808 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680472 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680545 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680681 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680775 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682109 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682255 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.680785 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.681041 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.681471 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.681571 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.681720 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.681788 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.681907 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682657 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682736 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682751 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682763 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682826 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682878 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.682948 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.683010 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.683294 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.683526 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.683586 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.684442 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.684881 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.685025 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.685139 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.685215 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.686782 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.686826 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.687229 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.687870 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.697184 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b67hl\" (UniqueName: \"kubernetes.io/projected/f78a3216-c1be-4cef-bf38-12b24f061f07-kube-api-access-b67hl\") pod \"node-resolver-xkhpr\" (UID: \"f78a3216-c1be-4cef-bf38-12b24f061f07\") " pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.699038 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.701733 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.705279 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.709192 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.709742 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.715454 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.718585 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.718626 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.718664 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.718681 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.718694 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.718888 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.721255 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.728599 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.750275 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.763138 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776223 4675 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776252 4675 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776262 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776271 4675 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776279 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776288 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776297 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776306 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776314 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776322 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776331 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776339 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776347 4675 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776355 4675 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776363 4675 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776371 4675 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776379 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776386 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776394 4675 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776402 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776409 4675 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776420 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776428 4675 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776436 4675 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776444 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776453 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776461 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776469 4675 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776478 4675 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776485 4675 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776494 4675 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776503 4675 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776511 4675 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776520 4675 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776529 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776537 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776544 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776552 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776560 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776568 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776576 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776584 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776591 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776600 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776607 4675 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776615 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776623 4675 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776631 4675 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776639 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776648 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776656 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776665 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776674 4675 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776684 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776693 4675 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.776743 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xkhpr" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.779653 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.785974 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 12:27:55 crc kubenswrapper[4675]: W1125 12:27:55.788439 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf78a3216_c1be_4cef_bf38_12b24f061f07.slice/crio-faaf1437baa37c5fa1465b1497447426dbfd5e5603dde228ed414879e060e6ef WatchSource:0}: Error finding container faaf1437baa37c5fa1465b1497447426dbfd5e5603dde228ed414879e060e6ef: Status 404 returned error can't find the container with id faaf1437baa37c5fa1465b1497447426dbfd5e5603dde228ed414879e060e6ef Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.789159 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.792725 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.799500 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.801881 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: W1125 12:27:55.812610 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-0eb1614982eff52b68126b346aab4a5cf7c035674a4e1bd7dd4ee22acffa6702 WatchSource:0}: Error finding container 0eb1614982eff52b68126b346aab4a5cf7c035674a4e1bd7dd4ee22acffa6702: Status 404 returned error can't find the container with id 0eb1614982eff52b68126b346aab4a5cf7c035674a4e1bd7dd4ee22acffa6702 Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.821001 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: W1125 12:27:55.824013 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-81a8176640f9df1f24f65f0555ffebff1726d5f9e06ad5b13e54fb46a72b4c0c WatchSource:0}: Error finding container 81a8176640f9df1f24f65f0555ffebff1726d5f9e06ad5b13e54fb46a72b4c0c: Status 404 returned error can't find the container with id 81a8176640f9df1f24f65f0555ffebff1726d5f9e06ad5b13e54fb46a72b4c0c Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.825274 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.825302 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.825310 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.825323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.825333 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.835173 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.848940 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.866687 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.875671 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.927050 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.927087 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.927099 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.927116 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:55 crc kubenswrapper[4675]: I1125 12:27:55.927128 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:55Z","lastTransitionTime":"2025-11-25T12:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.030545 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.030699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.030786 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.030888 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.030974 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.133510 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.133551 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.133561 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.133578 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.133588 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.181112 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.181188 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.181220 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.181246 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.181270 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181331 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:27:57.181299737 +0000 UTC m=+22.352892078 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181387 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181389 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181422 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181396 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181442 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181449 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:57.181429641 +0000 UTC m=+22.353022062 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181456 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181464 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181470 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:57.181461932 +0000 UTC m=+22.353054423 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181479 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181493 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:57.181484603 +0000 UTC m=+22.353077054 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:56 crc kubenswrapper[4675]: E1125 12:27:56.181516 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:57.181503473 +0000 UTC m=+22.353095824 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.236281 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.236312 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.236322 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.236338 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.236350 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.339278 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.339310 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.339319 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.339334 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.339344 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.384235 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-n7t8r"] Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.384590 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-cgbpj"] Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.384753 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-56fmx"] Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.384778 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.384950 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.385617 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.386914 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.387092 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.387257 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.387377 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.387538 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.387850 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.388608 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.388622 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.388752 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.388831 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.388958 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.391709 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.407010 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.418726 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.427698 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.439726 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.441225 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.441361 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.441461 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.441534 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.441591 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.456931 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.480215 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484467 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-os-release\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484501 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-netns\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484517 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-cnibin\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484533 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ede74da4-0d3a-463f-a591-b722f62358c8-multus-daemon-config\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484547 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhml4\" (UniqueName: \"kubernetes.io/projected/ede74da4-0d3a-463f-a591-b722f62358c8-kube-api-access-lhml4\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484562 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-system-cni-dir\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484576 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-os-release\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484646 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484700 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kctvj\" (UniqueName: \"kubernetes.io/projected/e2e07bd2-ea2f-48da-9358-49fed47fa922-kube-api-access-kctvj\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484734 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-cni-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484758 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-etc-kubernetes\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484790 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-cni-multus\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484836 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ede74da4-0d3a-463f-a591-b722f62358c8-cni-binary-copy\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484871 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-hostroot\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484887 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-conf-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484902 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e2e07bd2-ea2f-48da-9358-49fed47fa922-mcd-auth-proxy-config\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484925 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e2e07bd2-ea2f-48da-9358-49fed47fa922-proxy-tls\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.484978 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-socket-dir-parent\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485025 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-tuning-conf-dir\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485082 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldwpk\" (UniqueName: \"kubernetes.io/projected/31ed2ad2-a571-44ac-9f18-afd71427fd7a-kube-api-access-ldwpk\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485109 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e2e07bd2-ea2f-48da-9358-49fed47fa922-rootfs\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485134 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cnibin\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485165 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-multus-certs\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485206 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-k8s-cni-cncf-io\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485227 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-system-cni-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485256 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-cni-bin\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485275 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cni-binary-copy\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.485308 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-kubelet\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.499503 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.511806 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.525787 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.536404 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.543155 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.543196 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.543208 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.543223 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.543235 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.544624 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.554560 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.562004 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.570391 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.578938 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585885 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e2e07bd2-ea2f-48da-9358-49fed47fa922-proxy-tls\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585923 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-socket-dir-parent\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585940 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-tuning-conf-dir\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585961 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldwpk\" (UniqueName: \"kubernetes.io/projected/31ed2ad2-a571-44ac-9f18-afd71427fd7a-kube-api-access-ldwpk\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585978 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e2e07bd2-ea2f-48da-9358-49fed47fa922-rootfs\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585992 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cnibin\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586009 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-multus-certs\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586200 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-k8s-cni-cncf-io\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586209 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-socket-dir-parent\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586219 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-system-cni-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586240 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-cni-bin\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586266 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cni-binary-copy\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586289 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-kubelet\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586312 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-netns\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586365 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-os-release\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586381 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-cnibin\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586398 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-os-release\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586413 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ede74da4-0d3a-463f-a591-b722f62358c8-multus-daemon-config\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586427 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-system-cni-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586462 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhml4\" (UniqueName: \"kubernetes.io/projected/ede74da4-0d3a-463f-a591-b722f62358c8-kube-api-access-lhml4\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586491 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e2e07bd2-ea2f-48da-9358-49fed47fa922-rootfs\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586496 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-system-cni-dir\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586516 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cnibin\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586513 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586545 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-multus-certs\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586546 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-cni-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586580 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-etc-kubernetes\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586605 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kctvj\" (UniqueName: \"kubernetes.io/projected/e2e07bd2-ea2f-48da-9358-49fed47fa922-kube-api-access-kctvj\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586636 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-cni-multus\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586649 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-cni-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586653 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ede74da4-0d3a-463f-a591-b722f62358c8-cni-binary-copy\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586706 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-hostroot\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586727 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-tuning-conf-dir\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586747 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-conf-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586785 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e2e07bd2-ea2f-48da-9358-49fed47fa922-mcd-auth-proxy-config\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586789 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-cnibin\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586844 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-cni-bin\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.586900 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-os-release\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587280 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ede74da4-0d3a-463f-a591-b722f62358c8-cni-binary-copy\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.585998 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587340 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-multus-conf-dir\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587391 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-hostroot\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587417 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-netns\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587444 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-kubelet\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587464 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-etc-kubernetes\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587469 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cni-binary-copy\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587483 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-run-k8s-cni-cncf-io\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587512 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ede74da4-0d3a-463f-a591-b722f62358c8-host-var-lib-cni-multus\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587526 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-os-release\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587539 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/31ed2ad2-a571-44ac-9f18-afd71427fd7a-system-cni-dir\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.587994 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e2e07bd2-ea2f-48da-9358-49fed47fa922-mcd-auth-proxy-config\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.588228 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ede74da4-0d3a-463f-a591-b722f62358c8-multus-daemon-config\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.588473 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/31ed2ad2-a571-44ac-9f18-afd71427fd7a-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.589347 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e2e07bd2-ea2f-48da-9358-49fed47fa922-proxy-tls\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.600317 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.601594 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhml4\" (UniqueName: \"kubernetes.io/projected/ede74da4-0d3a-463f-a591-b722f62358c8-kube-api-access-lhml4\") pod \"multus-cgbpj\" (UID: \"ede74da4-0d3a-463f-a591-b722f62358c8\") " pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.601952 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldwpk\" (UniqueName: \"kubernetes.io/projected/31ed2ad2-a571-44ac-9f18-afd71427fd7a-kube-api-access-ldwpk\") pod \"multus-additional-cni-plugins-56fmx\" (UID: \"31ed2ad2-a571-44ac-9f18-afd71427fd7a\") " pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.603536 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kctvj\" (UniqueName: \"kubernetes.io/projected/e2e07bd2-ea2f-48da-9358-49fed47fa922-kube-api-access-kctvj\") pod \"machine-config-daemon-n7t8r\" (UID: \"e2e07bd2-ea2f-48da-9358-49fed47fa922\") " pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.610732 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.620134 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.629998 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.635794 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xkhpr" event={"ID":"f78a3216-c1be-4cef-bf38-12b24f061f07","Type":"ContainerStarted","Data":"99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.635870 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xkhpr" event={"ID":"f78a3216-c1be-4cef-bf38-12b24f061f07","Type":"ContainerStarted","Data":"faaf1437baa37c5fa1465b1497447426dbfd5e5603dde228ed414879e060e6ef"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.636929 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.636982 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"0eb1614982eff52b68126b346aab4a5cf7c035674a4e1bd7dd4ee22acffa6702"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.638217 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.638301 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.638385 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4112e06d20712eed542aa70527e42eceac070072fa4df0b963fe7770abd357ad"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.641055 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"81a8176640f9df1f24f65f0555ffebff1726d5f9e06ad5b13e54fb46a72b4c0c"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.645418 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.645452 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.645463 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.645479 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.645491 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.650073 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.660240 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.671475 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.683885 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.693978 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.699373 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-cgbpj" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.706241 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-56fmx" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.706844 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: W1125 12:27:56.709373 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podede74da4_0d3a_463f_a591_b722f62358c8.slice/crio-ef6e84e24a483f9272dc92169c9096305effe922b5ad641155e761bbd72786ae WatchSource:0}: Error finding container ef6e84e24a483f9272dc92169c9096305effe922b5ad641155e761bbd72786ae: Status 404 returned error can't find the container with id ef6e84e24a483f9272dc92169c9096305effe922b5ad641155e761bbd72786ae Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.712419 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.723599 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.742100 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.747368 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.747628 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.747708 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.747793 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.747891 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.755764 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.769094 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gv9qh"] Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.769842 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.773209 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.773427 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.773523 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.773643 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.773782 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.773921 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.774024 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.777618 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.793550 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.807852 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.817730 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.830466 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.842922 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.851168 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.851462 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.851475 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.851492 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.851504 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.859078 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.879497 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889457 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-script-lib\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889495 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-etc-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889515 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e5671459-4981-4259-a31d-595dd6f1f4b3-ovn-node-metrics-cert\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889535 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-node-log\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889550 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-bin\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889576 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-netns\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889591 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-systemd-units\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889604 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-config\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889624 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889646 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-ovn\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889662 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-netd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889680 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-log-socket\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889705 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-kubelet\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889721 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrftd\" (UniqueName: \"kubernetes.io/projected/e5671459-4981-4259-a31d-595dd6f1f4b3-kube-api-access-wrftd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889737 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-systemd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889754 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889772 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-slash\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889794 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-env-overrides\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889842 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-var-lib-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.889862 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.891145 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.906219 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.918414 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.954493 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.954534 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.954547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.954564 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.954576 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:56Z","lastTransitionTime":"2025-11-25T12:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.954592 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.989450 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990400 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-var-lib-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990438 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990462 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-env-overrides\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990516 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-script-lib\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990537 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-etc-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990556 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e5671459-4981-4259-a31d-595dd6f1f4b3-ovn-node-metrics-cert\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990556 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-var-lib-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990581 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-node-log\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990603 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-bin\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990639 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-netns\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990661 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-systemd-units\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990683 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-config\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990713 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990739 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-ovn\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990760 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-log-socket\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990780 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-netd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990807 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-kubelet\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990864 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrftd\" (UniqueName: \"kubernetes.io/projected/e5671459-4981-4259-a31d-595dd6f1f4b3-kube-api-access-wrftd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990889 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990914 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-systemd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.990937 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-slash\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991006 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-slash\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991051 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-etc-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991227 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991229 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-kubelet\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991252 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-log-socket\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991289 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-netd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991315 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-systemd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991327 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-script-lib\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991345 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-openvswitch\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991362 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-netns\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991389 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-node-log\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991416 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-bin\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991438 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991459 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-systemd-units\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991480 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-ovn\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991752 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-env-overrides\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:56 crc kubenswrapper[4675]: I1125 12:27:56.991959 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-config\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:56.998687 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e5671459-4981-4259-a31d-595dd6f1f4b3-ovn-node-metrics-cert\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.037600 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrftd\" (UniqueName: \"kubernetes.io/projected/e5671459-4981-4259-a31d-595dd6f1f4b3-kube-api-access-wrftd\") pod \"ovnkube-node-gv9qh\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.050384 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.056866 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.056906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.056917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.056937 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.056951 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.101639 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:27:57 crc kubenswrapper[4675]: W1125 12:27:57.113291 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5671459_4981_4259_a31d_595dd6f1f4b3.slice/crio-12600a85ff57348c06651f35503103bfc376b08171c8dad4d0502c5a9d32711a WatchSource:0}: Error finding container 12600a85ff57348c06651f35503103bfc376b08171c8dad4d0502c5a9d32711a: Status 404 returned error can't find the container with id 12600a85ff57348c06651f35503103bfc376b08171c8dad4d0502c5a9d32711a Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.159078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.159108 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.159119 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.159135 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.159147 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.193219 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.193307 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.193330 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.193348 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.193369 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193417 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:27:59.193389533 +0000 UTC m=+24.364981884 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193436 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193465 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193500 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193501 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:59.193489177 +0000 UTC m=+24.365081588 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193517 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193524 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193534 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193558 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193567 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:59.193550288 +0000 UTC m=+24.365142629 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193572 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193622 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:59.19360597 +0000 UTC m=+24.365198391 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.193640 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:27:59.193632531 +0000 UTC m=+24.365224972 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.261112 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.261153 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.261163 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.261179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.261191 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.363602 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.363639 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.363650 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.363666 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.363677 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.467274 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.467311 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.467321 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.467337 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.467347 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.531358 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.531475 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.531734 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.531782 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.531827 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:57 crc kubenswrapper[4675]: E1125 12:27:57.531870 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.535196 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.535781 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.536694 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.537417 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.538120 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.538629 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.539298 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.539985 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.540750 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.541383 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.542076 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.544882 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.545424 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.546078 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.547049 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.547593 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.549734 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.550185 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.550793 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.551412 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.551873 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.552658 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.553113 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.553718 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.554129 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.554726 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.558314 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.558982 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.559723 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.560429 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.561834 4675 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.561954 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.564058 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.565330 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.565904 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.567877 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.569221 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.569545 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.569577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.569596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.569610 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.569618 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.570355 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.571267 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.573458 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.573997 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.575383 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.576077 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.577008 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.577893 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.578793 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.579342 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.580410 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.580910 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.581694 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.582843 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.583347 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.584269 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.584699 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.623228 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.635996 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.639545 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.644219 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.644318 4675 generic.go:334] "Generic (PLEG): container finished" podID="31ed2ad2-a571-44ac-9f18-afd71427fd7a" containerID="9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db" exitCode=0 Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.644369 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerDied","Data":"9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.644390 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerStarted","Data":"fe7184ec126272332d620e7512bef4d1658e95bc7fcbb2ab5285326217998449"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.645894 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" exitCode=0 Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.645925 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.646297 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"12600a85ff57348c06651f35503103bfc376b08171c8dad4d0502c5a9d32711a"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.647614 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.647643 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.647654 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"7652fbda9a73a14c2451575c7ed54b7cf5051599c6e14e45d6bc36e5dea24e2f"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.648592 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerStarted","Data":"b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.648615 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerStarted","Data":"ef6e84e24a483f9272dc92169c9096305effe922b5ad641155e761bbd72786ae"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.655985 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.671838 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.672054 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.672146 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.672230 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.672306 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.675239 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.690180 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.714690 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.729394 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.745892 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.764335 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.777053 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.777093 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.777102 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.777116 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.777129 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.784310 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.796748 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.809938 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.825958 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.838892 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.854622 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.866680 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.879245 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.879278 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.879289 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.879303 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.879313 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.880896 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.900562 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.911753 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.924370 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.945697 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.958632 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.971793 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.981597 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.981646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.981658 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.981677 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.981691 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:57Z","lastTransitionTime":"2025-11-25T12:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:57 crc kubenswrapper[4675]: I1125 12:27:57.989018 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.029454 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.067987 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.083364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.083397 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.083406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.083420 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.083430 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.186420 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.186654 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.186662 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.186673 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.186682 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.279381 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-bcd9v"] Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.279762 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.281558 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.281611 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.282805 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.283459 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.288352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.288386 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.288397 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.288412 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.288422 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.299031 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.305543 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7wkj\" (UniqueName: \"kubernetes.io/projected/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-kube-api-access-g7wkj\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.305763 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-host\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.305928 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-serviceca\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.309147 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.323693 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.335231 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.348952 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.391482 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.391638 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.391702 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.391801 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.391885 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.396663 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.406810 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7wkj\" (UniqueName: \"kubernetes.io/projected/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-kube-api-access-g7wkj\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.406862 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-host\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.406878 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-serviceca\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.407099 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-host\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.407787 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-serviceca\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.430069 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.457844 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7wkj\" (UniqueName: \"kubernetes.io/projected/59b10c95-6525-4e1b-a48d-7e1fb681b8e4-kube-api-access-g7wkj\") pod \"node-ca-bcd9v\" (UID: \"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\") " pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.490550 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.494505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.494542 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.494553 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.494570 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.494580 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.534563 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.568985 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.592646 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-bcd9v" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.596832 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.596973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.597082 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.597172 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.597270 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: W1125 12:27:58.605442 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59b10c95_6525_4e1b_a48d_7e1fb681b8e4.slice/crio-868ad7e9a860954d261fe1e3fb989cf4e760eb8a007688cac26119cd19c6efaf WatchSource:0}: Error finding container 868ad7e9a860954d261fe1e3fb989cf4e760eb8a007688cac26119cd19c6efaf: Status 404 returned error can't find the container with id 868ad7e9a860954d261fe1e3fb989cf4e760eb8a007688cac26119cd19c6efaf Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.613792 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.652854 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.655606 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerStarted","Data":"a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.657152 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-bcd9v" event={"ID":"59b10c95-6525-4e1b-a48d-7e1fb681b8e4","Type":"ContainerStarted","Data":"868ad7e9a860954d261fe1e3fb989cf4e760eb8a007688cac26119cd19c6efaf"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.659390 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.659414 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.663238 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.698712 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.699860 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.699891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.699902 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.699921 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.699934 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.737286 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.797549 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.801950 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.801972 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.801981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.801993 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.802010 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.815127 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.848897 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.889913 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.904544 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.904579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.904588 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.904603 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.904613 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:58Z","lastTransitionTime":"2025-11-25T12:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.928572 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:58 crc kubenswrapper[4675]: I1125 12:27:58.982847 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:58Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.007048 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.007289 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.007356 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.007425 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.007539 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.016637 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.053066 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.094757 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.110088 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.110133 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.110144 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.110160 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.110172 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.128596 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.174697 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.212753 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.212789 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.212800 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.212836 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.212849 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.212851 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.216203 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.216310 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.216342 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.216365 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216397 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:28:03.216370111 +0000 UTC m=+28.387962452 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.216428 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216478 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216551 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216725 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216743 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216484 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216483 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216903 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216914 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216561 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:03.216539786 +0000 UTC m=+28.388132207 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216971 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:03.216947669 +0000 UTC m=+28.388540010 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216988 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:03.21698014 +0000 UTC m=+28.388572481 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.216999 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:03.21699426 +0000 UTC m=+28.388586601 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.251733 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.289937 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.315771 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.315809 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.315862 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.315880 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.315893 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.418538 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.418688 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.418753 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.418836 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.418895 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.520875 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.521167 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.521177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.521189 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.521198 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.532227 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.532328 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.532507 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.532326 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.532751 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:27:59 crc kubenswrapper[4675]: E1125 12:27:59.532804 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.623592 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.623617 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.623627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.623641 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.623649 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.666270 4675 generic.go:334] "Generic (PLEG): container finished" podID="31ed2ad2-a571-44ac-9f18-afd71427fd7a" containerID="a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811" exitCode=0 Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.666344 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerDied","Data":"a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.674438 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-bcd9v" event={"ID":"59b10c95-6525-4e1b-a48d-7e1fb681b8e4","Type":"ContainerStarted","Data":"21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.680497 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.680539 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.680552 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.689245 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.701520 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.712172 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.727556 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.729731 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.729768 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.729782 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.729858 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.729871 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.738763 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.751979 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.773028 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.793946 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.813101 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.826228 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.832349 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.832381 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.832390 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.832404 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.832413 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.842917 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.856150 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.871643 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.886521 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.900729 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.928397 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.935591 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.935619 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.935627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.935640 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.935650 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:27:59Z","lastTransitionTime":"2025-11-25T12:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:27:59 crc kubenswrapper[4675]: I1125 12:27:59.971795 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:27:59Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.010421 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.038945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.038988 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.039002 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.039028 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.039040 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.048302 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.093252 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.137218 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.141660 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.141687 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.141696 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.141710 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.141718 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.176905 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.211017 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.244165 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.244203 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.244212 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.244229 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.244238 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.248315 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.290222 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.328189 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.346138 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.346170 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.346178 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.346190 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.346199 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.369845 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.410688 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.448622 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.448658 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.448667 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.448681 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.448691 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.551441 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.551486 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.551500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.551518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.551551 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.653723 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.653762 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.653773 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.653791 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.653803 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.685782 4675 generic.go:334] "Generic (PLEG): container finished" podID="31ed2ad2-a571-44ac-9f18-afd71427fd7a" containerID="14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0" exitCode=0 Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.685860 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerDied","Data":"14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.688800 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.698913 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.713973 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.727442 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.741553 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.754346 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.755967 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.755997 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.756005 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.756019 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.756029 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.770875 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.789505 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.800595 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.813699 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.825257 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.849604 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.858216 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.858247 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.858259 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.858276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.858287 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.891495 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.928421 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.960214 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.960252 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.960263 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.960278 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.960289 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:00Z","lastTransitionTime":"2025-11-25T12:28:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:00 crc kubenswrapper[4675]: I1125 12:28:00.968190 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:00Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.011075 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.015621 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.018745 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.028137 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.062211 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.062239 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.062248 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.062261 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.062270 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.070893 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.109088 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.148615 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.164226 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.164251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.164259 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.164272 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.164281 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.190018 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.227031 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.266560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.266595 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.266604 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.266618 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.266628 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.269458 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.310489 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.348321 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.368870 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.368918 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.368929 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.368945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.368956 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.392129 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.429548 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.469126 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.470652 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.470689 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.470699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.470716 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.470726 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.515390 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.531894 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.531899 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:01 crc kubenswrapper[4675]: E1125 12:28:01.532058 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.531909 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:01 crc kubenswrapper[4675]: E1125 12:28:01.532183 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:01 crc kubenswrapper[4675]: E1125 12:28:01.532223 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.555593 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.573312 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.573351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.573362 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.573378 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.573389 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.588623 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.628728 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.669734 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.675317 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.675346 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.675358 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.675373 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.675383 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.694749 4675 generic.go:334] "Generic (PLEG): container finished" podID="31ed2ad2-a571-44ac-9f18-afd71427fd7a" containerID="62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3" exitCode=0 Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.694833 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerDied","Data":"62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.706739 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.750154 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.777555 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.777593 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.777605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.777621 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.777633 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.794734 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.859701 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.880157 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.880211 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.880222 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.880244 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.880256 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.885505 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.911251 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.948719 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.982903 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.982964 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.982976 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.982998 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.983018 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:01Z","lastTransitionTime":"2025-11-25T12:28:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:01 crc kubenswrapper[4675]: I1125 12:28:01.991580 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:01Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.030504 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.070281 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.085999 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.086042 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.086054 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.086071 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.086083 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.114069 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.150365 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.188120 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.188158 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.188169 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.188185 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.188419 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.189092 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.230411 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.269953 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.290977 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.291017 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.291029 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.291045 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.291058 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.307742 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.351839 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.388102 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.393341 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.393469 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.393538 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.393614 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.393676 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.432444 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.474254 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.495665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.495711 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.495744 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.495762 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.495774 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.515220 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.552437 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.593924 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.597583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.597602 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.597610 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.597624 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.597633 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.635251 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.669477 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.698873 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.698898 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.698906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.698921 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.698933 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.700757 4675 generic.go:334] "Generic (PLEG): container finished" podID="31ed2ad2-a571-44ac-9f18-afd71427fd7a" containerID="c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98" exitCode=0 Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.700839 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerDied","Data":"c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.706161 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.715137 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.757705 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.788604 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.801747 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.802021 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.802120 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.802219 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.802305 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.832525 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.871208 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.906772 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.906806 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.906836 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.906853 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.906865 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:02Z","lastTransitionTime":"2025-11-25T12:28:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.907915 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.948844 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:02 crc kubenswrapper[4675]: I1125 12:28:02.996908 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:02Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.008745 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.008768 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.008776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.008789 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.008798 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.028246 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.070325 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.110917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.110954 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.110964 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.110982 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.110992 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.115532 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.148371 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.189362 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.213179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.213229 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.213241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.213259 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.213270 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.228510 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.254230 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.254294 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.254315 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.254331 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254374 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:28:11.254355385 +0000 UTC m=+36.425947736 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254396 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.254414 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254427 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:11.254419947 +0000 UTC m=+36.426012288 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254425 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254472 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:11.254461258 +0000 UTC m=+36.426053599 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254510 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254556 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254569 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254528 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254616 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:11.254602093 +0000 UTC m=+36.426194434 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254623 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254633 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.254664 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:11.254649144 +0000 UTC m=+36.426241485 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.268293 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.311549 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.315059 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.315092 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.315101 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.315114 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.315122 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.351761 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.417354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.417388 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.417396 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.417408 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.417417 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.521095 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.521135 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.521143 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.521158 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.521181 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.531633 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.531683 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.531641 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.531739 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.531890 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:03 crc kubenswrapper[4675]: E1125 12:28:03.532001 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.623172 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.623208 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.623219 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.623235 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.623245 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.711808 4675 generic.go:334] "Generic (PLEG): container finished" podID="31ed2ad2-a571-44ac-9f18-afd71427fd7a" containerID="18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673" exitCode=0 Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.711885 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerDied","Data":"18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.725701 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.725725 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.725733 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.725717 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.725746 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.725914 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.735060 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.746604 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.755864 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.766765 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.782853 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.792885 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.803208 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.818647 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.828381 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.828428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.828437 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.828452 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.828460 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.830060 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.845435 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.864705 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.883000 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.909768 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.931276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.931320 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.931330 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.931348 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.931361 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:03Z","lastTransitionTime":"2025-11-25T12:28:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:03 crc kubenswrapper[4675]: I1125 12:28:03.951568 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:03Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.034009 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.034060 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.034078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.034095 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.034107 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.137710 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.138033 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.138046 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.138061 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.138071 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.240547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.240579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.240587 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.240609 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.240618 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.342701 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.342735 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.342747 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.342760 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.342769 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.444724 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.444772 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.444788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.444806 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.444839 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.548181 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.548227 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.548238 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.548259 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.548276 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.651694 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.651765 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.651788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.651858 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.651879 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.733218 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.733306 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.733329 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.733792 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.734159 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: E1125 12:28:04.751264 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:04Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.755597 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.755638 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.755650 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.755668 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.755681 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: E1125 12:28:04.771523 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:04Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.775568 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.775599 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.775608 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.775622 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.775634 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: E1125 12:28:04.792741 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:04Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.796745 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.796792 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.796806 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.796843 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.796856 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: E1125 12:28:04.808529 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:04Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.813107 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.813164 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.813179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.813201 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.813218 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: E1125 12:28:04.826090 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:04Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:04 crc kubenswrapper[4675]: E1125 12:28:04.826262 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.827943 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.827972 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.827982 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.827998 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.828010 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.930090 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.930174 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.930193 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.930222 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:04 crc kubenswrapper[4675]: I1125 12:28:04.930241 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:04Z","lastTransitionTime":"2025-11-25T12:28:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.032722 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.032781 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.032792 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.032834 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.032846 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.136189 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.136219 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.136228 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.136241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.136250 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.238083 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.238159 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.238173 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.238191 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.238232 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.340193 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.340230 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.340239 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.340252 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.340261 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.443486 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.443511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.443519 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.443531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.443539 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.531929 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.531962 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.531947 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:05 crc kubenswrapper[4675]: E1125 12:28:05.532120 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:05 crc kubenswrapper[4675]: E1125 12:28:05.532043 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:05 crc kubenswrapper[4675]: E1125 12:28:05.532207 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.543468 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.545007 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.545035 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.545045 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.545057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.545065 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.552544 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.565853 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.579708 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.592364 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.608712 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.619215 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.630727 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.646864 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.646894 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.646902 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.646916 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.646925 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.651081 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.663918 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.676868 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.690069 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.703864 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.719422 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.721559 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" event={"ID":"31ed2ad2-a571-44ac-9f18-afd71427fd7a","Type":"ContainerStarted","Data":"e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.725527 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.726171 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.726233 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.734451 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.748652 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.748770 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.748850 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.748929 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.749000 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.758368 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.769608 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.772588 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.773321 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.782318 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.800176 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.816419 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.829604 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.842075 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.851638 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.851838 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.851946 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.852026 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.852105 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.857651 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.869214 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.884083 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.898043 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.907361 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.919729 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.929892 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.940948 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.952846 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.954471 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.954524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.954534 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.954549 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.954560 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:05Z","lastTransitionTime":"2025-11-25T12:28:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.964581 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.976307 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.984674 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:05 crc kubenswrapper[4675]: I1125 12:28:05.995071 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.011554 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.025868 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.036372 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.052547 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.059362 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.059411 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.059422 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.059439 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.059452 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.067973 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.084758 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.095478 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.108793 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.119784 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.129408 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.162506 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.162547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.162559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.162575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.162585 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.264439 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.264464 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.264472 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.264485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.264493 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.367364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.367391 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.367399 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.367411 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.367420 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.470334 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.470370 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.470383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.470396 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.470405 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.572232 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.572276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.572287 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.572304 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.572317 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.675053 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.675095 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.675106 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.675126 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.675136 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.728000 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.777636 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.777679 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.777692 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.777709 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.777720 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.879385 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.879456 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.879468 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.879486 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.879498 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.981985 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.982060 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.982071 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.982086 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:06 crc kubenswrapper[4675]: I1125 12:28:06.982095 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:06Z","lastTransitionTime":"2025-11-25T12:28:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.085310 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.085354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.085364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.085385 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.085397 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.188138 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.188397 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.188491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.188586 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.188663 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.291034 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.291106 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.291128 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.291163 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.291185 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.393855 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.393903 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.393916 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.393935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.393944 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.496241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.496322 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.496347 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.496375 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.496396 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.531582 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.531735 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:07 crc kubenswrapper[4675]: E1125 12:28:07.531959 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.532053 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:07 crc kubenswrapper[4675]: E1125 12:28:07.532399 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:07 crc kubenswrapper[4675]: E1125 12:28:07.532464 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.598531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.598568 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.598576 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.598590 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.598598 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.602899 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl"] Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.603248 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.605244 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.605999 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.615571 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.626264 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.640044 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.654291 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.664758 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.677666 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.690369 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.691743 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fz4t\" (UniqueName: \"kubernetes.io/projected/e52af8a1-50bf-41c1-8661-136814faf6c6-kube-api-access-5fz4t\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.691809 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e52af8a1-50bf-41c1-8661-136814faf6c6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.691893 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e52af8a1-50bf-41c1-8661-136814faf6c6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.691947 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e52af8a1-50bf-41c1-8661-136814faf6c6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.700735 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.700779 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.700794 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.700835 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.700852 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.703851 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.715898 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.730432 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.730696 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.743800 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.756448 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.775526 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.787777 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.793132 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e52af8a1-50bf-41c1-8661-136814faf6c6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.793267 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e52af8a1-50bf-41c1-8661-136814faf6c6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.793366 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fz4t\" (UniqueName: \"kubernetes.io/projected/e52af8a1-50bf-41c1-8661-136814faf6c6-kube-api-access-5fz4t\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.793437 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e52af8a1-50bf-41c1-8661-136814faf6c6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.793986 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e52af8a1-50bf-41c1-8661-136814faf6c6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.794015 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e52af8a1-50bf-41c1-8661-136814faf6c6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.798380 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e52af8a1-50bf-41c1-8661-136814faf6c6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.803605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.803798 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.803924 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.804015 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.804093 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.805319 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.811221 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fz4t\" (UniqueName: \"kubernetes.io/projected/e52af8a1-50bf-41c1-8661-136814faf6c6-kube-api-access-5fz4t\") pod \"ovnkube-control-plane-749d76644c-cw6wl\" (UID: \"e52af8a1-50bf-41c1-8661-136814faf6c6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.824285 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:07Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.907354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.907416 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.907426 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.907439 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.907449 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:07Z","lastTransitionTime":"2025-11-25T12:28:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:07 crc kubenswrapper[4675]: I1125 12:28:07.916730 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" Nov 25 12:28:07 crc kubenswrapper[4675]: W1125 12:28:07.929005 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode52af8a1_50bf_41c1_8661_136814faf6c6.slice/crio-6bd8700d63085cb879ef131e49a6b33c6215348218294435c366762ac9e029d1 WatchSource:0}: Error finding container 6bd8700d63085cb879ef131e49a6b33c6215348218294435c366762ac9e029d1: Status 404 returned error can't find the container with id 6bd8700d63085cb879ef131e49a6b33c6215348218294435c366762ac9e029d1 Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.009741 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.009769 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.009778 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.009791 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.009822 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.111549 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.111577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.111585 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.111598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.111607 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.213313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.213354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.213365 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.213382 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.213396 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.315471 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.315518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.315531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.315548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.315561 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.418152 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.418202 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.418216 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.418234 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.418256 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.520776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.520810 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.520849 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.520865 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.520877 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.623318 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.623382 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.623404 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.623432 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.623454 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.725756 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.725793 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.725803 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.725842 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.725854 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.735602 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" event={"ID":"e52af8a1-50bf-41c1-8661-136814faf6c6","Type":"ContainerStarted","Data":"6bd8700d63085cb879ef131e49a6b33c6215348218294435c366762ac9e029d1"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.828704 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.828960 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.828969 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.828982 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.828990 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.931911 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.931971 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.931984 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.932001 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:08 crc kubenswrapper[4675]: I1125 12:28:08.932013 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:08Z","lastTransitionTime":"2025-11-25T12:28:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.034457 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.034491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.034502 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.034525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.034539 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.136548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.136584 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.136593 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.136607 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.136616 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.240096 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.240145 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.240159 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.240177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.240190 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.342147 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.342191 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.342202 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.342220 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.342231 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.444465 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.444490 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.444499 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.444511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.444520 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.510995 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-whffq"] Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.511394 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:09 crc kubenswrapper[4675]: E1125 12:28:09.511451 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.526169 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.531667 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.531765 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:09 crc kubenswrapper[4675]: E1125 12:28:09.531788 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:09 crc kubenswrapper[4675]: E1125 12:28:09.531959 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.531667 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:09 crc kubenswrapper[4675]: E1125 12:28:09.532216 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.541771 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.546054 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.546241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.546383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.546476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.546574 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.554218 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.563426 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.573712 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.598661 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.629871 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.640555 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.649367 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.649559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.649615 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.649672 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.649724 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.657094 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.669600 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.684133 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.701157 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.705792 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.706028 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2r78\" (UniqueName: \"kubernetes.io/projected/134942f4-79a7-4b14-9f21-ae027d146b44-kube-api-access-q2r78\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.714493 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.728358 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.740527 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" event={"ID":"e52af8a1-50bf-41c1-8661-136814faf6c6","Type":"ContainerStarted","Data":"bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.747577 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.751631 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.751657 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.751667 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.751679 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.751688 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.759193 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.769372 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:09Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.807073 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.807133 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2r78\" (UniqueName: \"kubernetes.io/projected/134942f4-79a7-4b14-9f21-ae027d146b44-kube-api-access-q2r78\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:09 crc kubenswrapper[4675]: E1125 12:28:09.807236 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:09 crc kubenswrapper[4675]: E1125 12:28:09.807310 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:10.307292506 +0000 UTC m=+35.478884847 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.823151 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.823284 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.831662 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2r78\" (UniqueName: \"kubernetes.io/projected/134942f4-79a7-4b14-9f21-ae027d146b44-kube-api-access-q2r78\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.835198 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" probeResult="failure" output="" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.847064 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" probeResult="failure" output="" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.853297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.853318 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.853327 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.853340 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.853349 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.955520 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.955550 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.955560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.955575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:09 crc kubenswrapper[4675]: I1125 12:28:09.955586 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:09Z","lastTransitionTime":"2025-11-25T12:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.057944 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.057980 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.057991 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.058007 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.058020 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.160298 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.160323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.160331 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.160344 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.160352 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.263266 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.263313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.263328 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.263347 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.263362 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.311348 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:10 crc kubenswrapper[4675]: E1125 12:28:10.311495 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:10 crc kubenswrapper[4675]: E1125 12:28:10.311547 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:11.311532468 +0000 UTC m=+36.483124809 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.366485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.366725 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.366737 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.366755 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.366767 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.469880 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.469921 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.469931 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.469946 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.469956 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.573388 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.573436 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.573449 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.573478 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.573490 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.676332 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.676377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.676389 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.676426 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.676440 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.745320 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" event={"ID":"e52af8a1-50bf-41c1-8661-136814faf6c6","Type":"ContainerStarted","Data":"cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.746888 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/0.log" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.750011 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a" exitCode=1 Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.750050 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.750619 4675 scope.go:117] "RemoveContainer" containerID="e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.758151 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.778743 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.778780 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.778791 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.778806 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.778944 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.781283 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.792503 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.805340 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.826376 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.846388 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.857414 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.870282 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.881084 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.881341 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.881349 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.881362 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.881372 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.884116 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.893840 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.905691 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.919098 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.932224 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.944581 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.954624 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.964862 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.973950 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.983602 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.983629 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.983638 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.983650 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.983659 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:10Z","lastTransitionTime":"2025-11-25T12:28:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:10 crc kubenswrapper[4675]: I1125 12:28:10.990571 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.000599 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:10Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.012768 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.034035 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"message\\\":\\\"rs/externalversions/factory.go:140\\\\nI1125 12:28:10.000555 5915 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:10.001115 5915 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:10.001319 5915 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:10.001696 5915 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 12:28:10.001727 5915 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 12:28:10.001739 5915 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 12:28:10.001746 5915 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 12:28:10.001778 5915 factory.go:656] Stopping watch factory\\\\nI1125 12:28:10.001798 5915 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 12:28:10.001799 5915 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 12:28:10.001807 5915 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 12:28:10.001854 5915 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.045696 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.065705 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.079993 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.085605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.085630 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.085638 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.085651 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.085662 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.093538 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.105414 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.119103 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.131237 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.142986 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.156112 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.168295 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.183460 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.187105 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.187139 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.187148 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.187161 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.187170 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.194101 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.205062 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.289414 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.289451 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.289463 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.289478 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.289489 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.321161 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.321266 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.321296 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.321321 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321346 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:28:27.321320423 +0000 UTC m=+52.492912764 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.321395 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321460 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.321465 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321479 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321491 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321544 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:27.32152822 +0000 UTC m=+52.493120641 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321584 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321598 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321608 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321644 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:27.321632833 +0000 UTC m=+52.493225174 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321686 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321707 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:13.321700315 +0000 UTC m=+38.493292656 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321741 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321759 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:27.321754147 +0000 UTC m=+52.493346488 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321826 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.321846 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:27.321840599 +0000 UTC m=+52.493432940 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.392580 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.392623 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.392632 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.392649 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.392659 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.494478 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.494516 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.494527 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.494543 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.494556 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.532209 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.532231 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.532273 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.532209 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.532352 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.532480 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.532530 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:11 crc kubenswrapper[4675]: E1125 12:28:11.532611 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.596440 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.596476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.596485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.596498 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.596507 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.698192 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.698248 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.698263 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.698287 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.698302 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.754096 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/0.log" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.757109 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.757546 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.772138 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.783979 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.797451 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.800446 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.800489 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.800501 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.800518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.800530 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.811940 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.825665 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.842019 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.854579 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.866123 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.877094 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.888319 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.902835 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.902869 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.902881 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.902899 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.902912 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:11Z","lastTransitionTime":"2025-11-25T12:28:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.904167 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.916871 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.932558 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.951601 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.966114 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:11 crc kubenswrapper[4675]: I1125 12:28:11.981449 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:11Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.006074 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.006313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.006411 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.006518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.006615 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.008037 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"message\\\":\\\"rs/externalversions/factory.go:140\\\\nI1125 12:28:10.000555 5915 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:10.001115 5915 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:10.001319 5915 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:10.001696 5915 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 12:28:10.001727 5915 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 12:28:10.001739 5915 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 12:28:10.001746 5915 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 12:28:10.001778 5915 factory.go:656] Stopping watch factory\\\\nI1125 12:28:10.001798 5915 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 12:28:10.001799 5915 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 12:28:10.001807 5915 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 12:28:10.001854 5915 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.109383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.109699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.109880 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.110029 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.110164 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.212742 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.212776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.212784 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.212799 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.212807 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.314502 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.314544 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.314556 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.314574 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.314586 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.416374 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.416406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.416418 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.416434 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.416445 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.520406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.520465 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.520480 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.520501 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.520513 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.623463 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.623535 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.623547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.623568 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.623581 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.726194 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.726241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.726253 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.726271 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.726284 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.762902 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/1.log" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.763843 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/0.log" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.766827 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873" exitCode=1 Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.766921 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.767026 4675 scope.go:117] "RemoveContainer" containerID="e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.767520 4675 scope.go:117] "RemoveContainer" containerID="945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873" Nov 25 12:28:12 crc kubenswrapper[4675]: E1125 12:28:12.767698 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.800022 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.810597 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.822871 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.828223 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.828250 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.828259 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.828272 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.828283 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.841952 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c6bb1c97157967a2e28133f3b39640bc55d16a0369cebc8cc2b408dc7d021a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"message\\\":\\\"rs/externalversions/factory.go:140\\\\nI1125 12:28:10.000555 5915 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:10.001115 5915 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:10.001319 5915 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:10.001696 5915 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 12:28:10.001727 5915 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 12:28:10.001739 5915 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 12:28:10.001746 5915 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 12:28:10.001778 5915 factory.go:656] Stopping watch factory\\\\nI1125 12:28:10.001798 5915 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 12:28:10.001799 5915 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 12:28:10.001807 5915 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 12:28:10.001854 5915 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.854312 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.867977 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.879578 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.892513 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.903274 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.913392 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.922489 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.930848 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.930888 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.930903 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.930925 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.930941 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:12Z","lastTransitionTime":"2025-11-25T12:28:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.931756 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.943763 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.952703 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.965253 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.975842 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:12 crc kubenswrapper[4675]: I1125 12:28:12.986447 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:12Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.033145 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.033185 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.033195 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.033309 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.033324 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.135413 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.135448 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.135458 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.135470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.135480 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.237947 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.237993 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.238006 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.238023 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.238038 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.339113 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.339407 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.339465 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:17.339448188 +0000 UTC m=+42.511040539 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.340664 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.340721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.340733 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.340749 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.340762 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.446667 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.446742 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.446772 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.446795 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.446833 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.531384 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.531443 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.531493 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.531396 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.531596 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.531657 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.531732 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.531784 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.548885 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.548923 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.548935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.548958 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.548969 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.650912 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.650951 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.650960 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.650983 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.651001 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.753699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.753733 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.753742 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.753757 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.753771 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.772297 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/1.log" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.777340 4675 scope.go:117] "RemoveContainer" containerID="945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873" Nov 25 12:28:13 crc kubenswrapper[4675]: E1125 12:28:13.777773 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.790545 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.800866 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.812446 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.831549 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.842918 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.856058 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.856302 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.856389 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.856472 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.856489 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.856556 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.875475 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.888368 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.902669 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.915803 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.931939 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.945307 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.958595 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.959853 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.959880 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.959890 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.959904 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.959913 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:13Z","lastTransitionTime":"2025-11-25T12:28:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.970228 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.979944 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:13 crc kubenswrapper[4675]: I1125 12:28:13.990684 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.000083 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:13Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.062461 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.062533 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.062545 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.062583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.062597 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.165433 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.165505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.165517 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.165556 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.165569 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.267035 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.267097 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.267108 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.267123 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.267132 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.369334 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.369388 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.369400 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.369414 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.369423 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.472124 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.472165 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.472177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.472194 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.472206 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.575128 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.575177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.575189 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.575205 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.575217 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.677519 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.677572 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.677584 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.677603 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.677618 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.779477 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.779530 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.779545 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.779560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.779569 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.837482 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.837539 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.837554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.837584 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.837670 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: E1125 12:28:14.850687 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:14Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.854181 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.854215 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.854225 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.854240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.854251 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: E1125 12:28:14.866927 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:14Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.869868 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.869926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.869938 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.869953 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.869965 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: E1125 12:28:14.882430 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:14Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.885559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.885585 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.885596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.885612 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.885627 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: E1125 12:28:14.898015 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:14Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.902490 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.902538 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.902553 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.902574 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.902586 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:14 crc kubenswrapper[4675]: E1125 12:28:14.916742 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:14Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:14 crc kubenswrapper[4675]: E1125 12:28:14.916941 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.918373 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.918414 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.918425 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.918440 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:14 crc kubenswrapper[4675]: I1125 12:28:14.918451 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:14Z","lastTransitionTime":"2025-11-25T12:28:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.021070 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.021106 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.021116 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.021131 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.021141 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.123531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.123590 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.123604 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.123625 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.123638 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.225793 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.225855 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.225869 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.225887 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.225899 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.327887 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.327915 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.327923 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.327936 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.327944 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.430473 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.430500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.430509 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.430522 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.430530 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.531736 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:15 crc kubenswrapper[4675]: E1125 12:28:15.531933 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.532285 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:15 crc kubenswrapper[4675]: E1125 12:28:15.532381 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.532406 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:15 crc kubenswrapper[4675]: E1125 12:28:15.532483 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.532526 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:15 crc kubenswrapper[4675]: E1125 12:28:15.532576 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.533163 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.533214 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.533225 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.533240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.533251 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.544918 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.554254 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.569041 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.579766 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.591255 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.609156 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.623941 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.635338 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.635375 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.635385 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.635402 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.635413 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.636155 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.655872 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.669362 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.679934 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.689900 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.704033 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.714003 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.726444 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.735605 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.738583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.738627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.738637 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.738671 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.738682 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.745324 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:15Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.840541 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.840603 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.840618 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.840633 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.840644 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.942647 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.942687 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.942705 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.942721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:15 crc kubenswrapper[4675]: I1125 12:28:15.942732 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:15Z","lastTransitionTime":"2025-11-25T12:28:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.045938 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.046006 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.046033 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.046061 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.046080 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.148897 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.148957 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.148979 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.149007 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.149027 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.252470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.252781 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.252803 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.252858 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.252875 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.355600 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.355671 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.355687 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.355709 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.355724 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.458771 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.458841 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.458853 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.458873 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.458885 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.561469 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.561544 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.561565 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.561599 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.561616 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.665091 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.665140 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.665157 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.665179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.665197 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.767894 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.768037 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.768068 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.768138 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.768167 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.870935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.871082 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.871110 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.871134 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.871151 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.974567 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.974624 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.974635 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.974650 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:16 crc kubenswrapper[4675]: I1125 12:28:16.974660 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:16Z","lastTransitionTime":"2025-11-25T12:28:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.077383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.077428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.077452 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.077467 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.077475 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.179532 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.179572 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.179583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.179598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.179608 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.282117 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.282164 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.282177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.282195 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.282208 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.384981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.385018 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.385026 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.385044 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.385053 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.388108 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:17 crc kubenswrapper[4675]: E1125 12:28:17.388311 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:17 crc kubenswrapper[4675]: E1125 12:28:17.388378 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:25.388357665 +0000 UTC m=+50.559950006 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.486875 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.486906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.486917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.487127 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.487138 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.532193 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.532219 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:17 crc kubenswrapper[4675]: E1125 12:28:17.532350 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:17 crc kubenswrapper[4675]: E1125 12:28:17.532426 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.532464 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:17 crc kubenswrapper[4675]: E1125 12:28:17.532530 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.532218 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:17 crc kubenswrapper[4675]: E1125 12:28:17.532934 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.590491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.590528 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.590536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.590550 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.590558 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.692750 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.692829 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.692841 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.692857 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.692866 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.795472 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.795505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.795515 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.795534 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.795551 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.898810 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.898913 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.898929 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.898956 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:17 crc kubenswrapper[4675]: I1125 12:28:17.898972 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:17Z","lastTransitionTime":"2025-11-25T12:28:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.001689 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.001738 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.001750 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.001768 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.001780 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.104867 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.104939 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.104960 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.104989 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.105007 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.207451 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.207506 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.207524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.207556 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.207592 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.310606 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.310697 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.310729 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.310759 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.310783 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.413483 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.413539 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.413554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.413571 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.413583 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.516307 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.516378 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.516390 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.516407 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.516420 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.618431 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.618505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.618530 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.618559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.618576 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.720968 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.721018 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.721034 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.721052 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.721065 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.823908 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.823955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.823965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.823981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.823992 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.926471 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.926533 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.926551 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.926576 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:18 crc kubenswrapper[4675]: I1125 12:28:18.926593 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:18Z","lastTransitionTime":"2025-11-25T12:28:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.028580 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.028706 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.028721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.028737 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.028751 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.130621 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.130657 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.130669 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.130684 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.130694 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.232841 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.232878 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.232886 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.232900 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.232910 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.335762 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.335796 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.335806 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.335844 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.335857 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.439337 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.439397 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.439420 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.439445 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.439466 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.531945 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.531987 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.531944 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.532065 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:19 crc kubenswrapper[4675]: E1125 12:28:19.532182 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:19 crc kubenswrapper[4675]: E1125 12:28:19.532358 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:19 crc kubenswrapper[4675]: E1125 12:28:19.532587 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:19 crc kubenswrapper[4675]: E1125 12:28:19.532656 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.541974 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.542002 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.542010 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.542024 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.542032 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.645184 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.645220 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.645230 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.645244 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.645254 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.747179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.747221 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.747236 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.747256 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.747271 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.850285 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.850412 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.850426 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.850442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.850453 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.953276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.953323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.953331 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.953346 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:19 crc kubenswrapper[4675]: I1125 12:28:19.953355 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:19Z","lastTransitionTime":"2025-11-25T12:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.056275 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.056361 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.056375 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.056394 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.056405 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.158448 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.158501 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.158515 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.158535 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.158550 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.261670 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.261713 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.261724 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.261741 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.261752 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.364004 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.364088 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.364101 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.364117 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.364129 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.466729 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.466776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.466788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.466806 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.466839 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.570188 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.570222 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.570241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.570258 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.570268 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.672892 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.672953 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.673046 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.673078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.673095 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.775262 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.775303 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.775313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.775330 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.775341 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.877503 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.877560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.877576 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.877602 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.877619 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.980575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.980631 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.980665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.980693 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:20 crc kubenswrapper[4675]: I1125 12:28:20.980712 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:20Z","lastTransitionTime":"2025-11-25T12:28:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.084115 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.084173 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.084189 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.084212 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.084224 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.186891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.186926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.186934 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.186947 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.186958 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.289538 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.289587 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.289605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.289626 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.289641 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.392319 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.392354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.392365 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.392381 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.392394 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.495043 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.495080 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.495096 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.495116 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.495130 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.531919 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.532004 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:21 crc kubenswrapper[4675]: E1125 12:28:21.532046 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.532088 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.532009 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:21 crc kubenswrapper[4675]: E1125 12:28:21.532254 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:21 crc kubenswrapper[4675]: E1125 12:28:21.532450 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:21 crc kubenswrapper[4675]: E1125 12:28:21.532590 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.598059 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.598114 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.598130 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.598154 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.598173 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.702790 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.702883 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.702906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.702935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.702959 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.805955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.806023 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.806050 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.806096 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.806120 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.909525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.909567 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.909579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.909596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:21 crc kubenswrapper[4675]: I1125 12:28:21.909607 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:21Z","lastTransitionTime":"2025-11-25T12:28:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.011976 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.012013 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.012024 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.012039 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.012049 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.114634 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.114666 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.114676 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.114692 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.114703 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.218058 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.218112 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.218129 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.218148 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.218160 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.320407 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.320448 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.320459 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.320474 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.320486 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.422983 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.423028 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.423040 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.423057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.423070 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.527427 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.527467 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.527479 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.527498 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.527511 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.630216 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.630254 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.630265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.630284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.630297 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.732468 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.732939 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.733201 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.733530 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.733742 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.836386 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.836432 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.836447 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.836468 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.836481 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.939536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.939607 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.939627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.939656 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:22 crc kubenswrapper[4675]: I1125 12:28:22.939675 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:22Z","lastTransitionTime":"2025-11-25T12:28:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.042731 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.042761 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.042769 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.042783 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.042792 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.144948 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.144987 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.144997 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.145013 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.145024 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.248158 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.248203 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.248216 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.248237 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.248252 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.350945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.350980 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.350990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.351004 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.351016 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.453093 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.453154 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.453176 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.453208 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.453229 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.531702 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:23 crc kubenswrapper[4675]: E1125 12:28:23.531868 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.532277 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.532429 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:23 crc kubenswrapper[4675]: E1125 12:28:23.532515 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.532541 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:23 crc kubenswrapper[4675]: E1125 12:28:23.532643 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:23 crc kubenswrapper[4675]: E1125 12:28:23.532743 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.556464 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.556518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.556531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.556555 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.556568 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.658967 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.659004 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.659013 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.659064 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.659075 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.762467 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.762556 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.762586 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.762617 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.762641 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.865183 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.865234 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.865246 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.865272 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.865286 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.967403 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.967442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.967454 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.967469 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:23 crc kubenswrapper[4675]: I1125 12:28:23.967479 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:23Z","lastTransitionTime":"2025-11-25T12:28:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.069836 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.069867 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.069875 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.069888 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.069899 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.173640 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.173716 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.173729 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.173747 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.173759 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.276878 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.276918 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.276930 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.276945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.276957 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.379621 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.379859 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.379880 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.379894 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.379903 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.482306 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.482351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.482363 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.482381 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.482393 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.584297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.584351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.584363 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.584378 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.584390 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.686781 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.686881 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.686893 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.686924 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.686934 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.789358 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.789402 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.789418 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.789436 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.789450 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.892746 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.892789 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.892797 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.892837 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.892850 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.995720 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.995760 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.995776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.995796 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:24 crc kubenswrapper[4675]: I1125 12:28:24.995833 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:24Z","lastTransitionTime":"2025-11-25T12:28:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.064758 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.064801 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.064826 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.064844 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.064857 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.076367 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.079455 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.079489 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.079500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.079516 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.079526 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.090700 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.094442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.094480 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.094491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.094525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.094539 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.106934 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.110477 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.110514 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.110523 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.110537 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.110546 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.122140 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.124834 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.124867 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.124876 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.124891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.124902 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.135984 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.136107 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.137636 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.137673 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.137685 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.137699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.137710 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.240235 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.240270 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.240281 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.240296 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.240308 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.342481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.342516 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.342524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.342542 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.342551 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.446062 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.446120 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.446136 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.446156 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.446171 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.485216 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.485424 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.485529 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:41.485508455 +0000 UTC m=+66.657100856 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.532132 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.532224 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.532140 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.532412 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.532468 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.532518 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.532593 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:25 crc kubenswrapper[4675]: E1125 12:28:25.532703 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.549032 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.549074 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.549085 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.549102 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.549114 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.553945 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.566543 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.579934 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.593239 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.606072 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.621643 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.633644 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.645934 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.651264 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.651342 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.651353 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.651367 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.651376 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.657673 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.673592 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.685196 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.697900 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.714929 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.727891 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.742723 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.754409 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.754472 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.754488 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.754512 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.754530 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.763994 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.790683 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.857776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.858140 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.858333 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.858506 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.858973 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.961284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.961323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.961334 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.961350 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:25 crc kubenswrapper[4675]: I1125 12:28:25.961371 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:25Z","lastTransitionTime":"2025-11-25T12:28:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.064493 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.064540 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.064556 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.064578 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.064595 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.167253 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.167303 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.167391 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.167412 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.167424 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.270066 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.270181 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.270213 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.270230 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.270239 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.373061 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.373086 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.373095 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.373107 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.373115 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.476431 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.476481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.476495 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.476514 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.476525 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.579655 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.579695 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.579704 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.579720 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.579729 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.682627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.682690 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.682699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.682729 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.682742 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.785807 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.785921 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.785963 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.785998 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.786021 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.888636 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.888699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.888714 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.888735 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.888749 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.992323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.992368 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.992380 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.992399 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:26 crc kubenswrapper[4675]: I1125 12:28:26.992413 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:26Z","lastTransitionTime":"2025-11-25T12:28:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.095883 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.095926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.095935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.095951 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.095962 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.168977 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.186392 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.189470 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.199549 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.199592 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.199603 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.199619 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.199630 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.203791 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.221977 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.234614 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.245574 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.262488 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.279631 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.293146 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.302064 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.302115 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.302130 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.302152 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.302168 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.306449 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.318937 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.329525 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.340621 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.353893 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.368430 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.382898 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.395030 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.404928 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.404964 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.404978 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405019 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405031 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405182 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405285 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405327 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405361 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405405 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:28:59.405379348 +0000 UTC m=+84.576971689 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405364 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405431 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:59.405422899 +0000 UTC m=+84.577015240 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405463 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405486 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405485 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405500 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405539 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:59.405521782 +0000 UTC m=+84.577114193 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405556 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:59.405548913 +0000 UTC m=+84.577141374 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.405576 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405647 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405662 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405670 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.405723 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:28:59.405713289 +0000 UTC m=+84.577305690 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.407164 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.507601 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.507647 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.507658 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.507677 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.507690 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.532265 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.532543 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.532645 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.532662 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.532726 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.532841 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.532917 4675 scope.go:117] "RemoveContainer" containerID="945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.533006 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:27 crc kubenswrapper[4675]: E1125 12:28:27.533146 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.609591 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.609632 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.609642 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.609656 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.609666 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.711660 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.711699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.711723 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.711742 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.711753 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.814640 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.814689 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.814699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.814715 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.814725 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.831207 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/1.log" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.834007 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.834564 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.852489 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.867466 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.881843 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.896926 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.916724 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.916770 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.916782 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.916798 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.916809 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:27Z","lastTransitionTime":"2025-11-25T12:28:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.920685 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.939394 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.957927 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.972493 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.981379 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.991309 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:27 crc kubenswrapper[4675]: I1125 12:28:27.999521 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.010772 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.018850 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.018878 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.018886 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.018900 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.018911 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.021165 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.032063 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.051214 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.068327 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.079501 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.091753 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.121443 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.121487 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.121498 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.121518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.121529 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.224087 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.224377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.224388 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.224401 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.224409 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.326878 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.326952 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.326962 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.326979 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.326989 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.429753 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.429793 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.429804 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.429833 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.429866 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.532056 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.532085 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.532093 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.532109 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.532127 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.634157 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.634191 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.634200 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.634215 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.634223 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.736809 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.736881 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.736896 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.736914 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.736928 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.839508 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.839565 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.839580 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.839602 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.839619 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.839864 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/2.log" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.840638 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/1.log" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.843598 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3" exitCode=1 Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.843660 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.843713 4675 scope.go:117] "RemoveContainer" containerID="945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.844481 4675 scope.go:117] "RemoveContainer" containerID="7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3" Nov 25 12:28:28 crc kubenswrapper[4675]: E1125 12:28:28.844701 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.878747 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.902800 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.915282 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.925908 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.936851 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.941772 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.942076 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.942284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.942480 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.942707 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:28Z","lastTransitionTime":"2025-11-25T12:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.950231 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.961803 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.974232 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.985162 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:28 crc kubenswrapper[4675]: I1125 12:28:28.998260 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.006917 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.018135 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.027132 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.038225 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.047462 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.047689 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.047767 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.047863 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.047938 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.058792 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.070948 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.082454 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.099234 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://945aaed4c5f42e40bd84fc7436d85b58cebfc9cafd26be4dd41f46407e02c873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:11Z\\\",\\\"message\\\":\\\" lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"83c1e277-3d22-42ae-a355-f7a0ff0bd171\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-image-registry/image-registry_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.93\\\\\\\", Port:5000, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1125 12:28:11.707745 6118 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.149905 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.149944 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.149954 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.149967 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.149977 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.252587 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.252623 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.252633 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.252648 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.252659 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.355795 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.355953 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.355973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.356002 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.356017 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.458289 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.458321 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.458329 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.458341 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.458350 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.531695 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.531724 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.531739 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.531704 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:29 crc kubenswrapper[4675]: E1125 12:28:29.531845 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:29 crc kubenswrapper[4675]: E1125 12:28:29.531919 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:29 crc kubenswrapper[4675]: E1125 12:28:29.531994 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:29 crc kubenswrapper[4675]: E1125 12:28:29.532055 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.560364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.560408 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.560420 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.560438 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.560450 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.662607 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.662683 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.662727 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.662782 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.662798 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.765457 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.765505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.765518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.765536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.765547 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.847997 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/2.log" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.850975 4675 scope.go:117] "RemoveContainer" containerID="7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3" Nov 25 12:28:29 crc kubenswrapper[4675]: E1125 12:28:29.851203 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.868453 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.868479 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.868492 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.868515 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.868524 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.868500 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.883569 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.896065 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.907451 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.917584 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.933644 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.946984 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.959365 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.972478 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.972511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.972520 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.972537 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.972546 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:29Z","lastTransitionTime":"2025-11-25T12:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.972837 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.986183 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:29 crc kubenswrapper[4675]: I1125 12:28:29.998109 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:29Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.011791 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.021564 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.033253 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.052377 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.065757 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.074599 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.074655 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.074665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.074677 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.074686 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.079371 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.097957 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.176694 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.176730 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.176738 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.176752 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.176761 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.278372 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.278417 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.278428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.278446 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.278456 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.381343 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.381400 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.381416 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.381443 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.381459 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.484039 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.484071 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.484079 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.484094 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.484109 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.585840 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.585880 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.585891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.585906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.585919 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.688605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.688648 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.688662 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.688682 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.688696 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.790981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.791029 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.791041 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.791058 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.791071 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.892748 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.892787 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.892796 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.892809 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.892837 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.995020 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.995051 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.995059 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.995075 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:30 crc kubenswrapper[4675]: I1125 12:28:30.995084 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:30Z","lastTransitionTime":"2025-11-25T12:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.097690 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.097741 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.097753 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.097769 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.097781 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.200132 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.200163 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.200171 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.200183 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.200193 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.303013 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.303080 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.303098 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.303115 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.303127 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.404867 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.404901 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.404911 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.404927 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.404936 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.507167 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.507745 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.507763 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.507779 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.507789 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.531987 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:31 crc kubenswrapper[4675]: E1125 12:28:31.532217 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.532271 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.532322 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:31 crc kubenswrapper[4675]: E1125 12:28:31.532431 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.532325 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:31 crc kubenswrapper[4675]: E1125 12:28:31.532703 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:31 crc kubenswrapper[4675]: E1125 12:28:31.532807 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.609505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.609564 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.609579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.609596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.609608 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.711892 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.711940 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.711951 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.711969 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.711981 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.818135 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.818204 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.818221 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.818243 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.818259 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.921006 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.921076 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.921089 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.921104 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:31 crc kubenswrapper[4675]: I1125 12:28:31.921152 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:31Z","lastTransitionTime":"2025-11-25T12:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.023891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.023924 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.023932 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.023945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.023954 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.126726 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.126759 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.126767 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.126780 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.126789 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.229147 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.229206 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.229218 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.229253 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.229268 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.331511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.331548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.331559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.331574 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.331584 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.433739 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.433792 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.433804 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.433836 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.433849 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.536790 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.536843 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.536871 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.536886 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.536897 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.639791 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.639868 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.639884 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.639907 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.639923 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.742132 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.742160 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.742168 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.742181 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.742188 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.844628 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.844665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.844675 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.844689 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.844703 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.946400 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.946442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.946450 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.946469 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:32 crc kubenswrapper[4675]: I1125 12:28:32.946478 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:32Z","lastTransitionTime":"2025-11-25T12:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.048861 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.048895 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.048904 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.048917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.048926 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.151440 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.151476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.151485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.151500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.151510 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.254124 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.254174 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.254186 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.254202 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.254213 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.356354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.356392 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.356400 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.356414 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.356423 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.459050 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.459089 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.459100 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.459115 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.459124 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.532191 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.532238 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:33 crc kubenswrapper[4675]: E1125 12:28:33.532330 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.532364 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.532191 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:33 crc kubenswrapper[4675]: E1125 12:28:33.532548 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:33 crc kubenswrapper[4675]: E1125 12:28:33.532641 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:33 crc kubenswrapper[4675]: E1125 12:28:33.532717 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.561077 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.561113 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.561123 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.561139 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.561153 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.663526 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.663649 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.663662 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.663679 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.663690 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.765608 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.765643 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.765652 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.765667 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.765676 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.867955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.867984 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.867994 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.868008 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.868020 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.970453 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.970497 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.970511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.970528 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:33 crc kubenswrapper[4675]: I1125 12:28:33.970540 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:33Z","lastTransitionTime":"2025-11-25T12:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.072663 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.072697 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.072725 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.072739 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.072748 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.175388 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.175459 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.175472 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.175502 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.175520 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.278243 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.278295 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.278310 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.278324 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.278332 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.381686 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.381730 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.381742 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.381762 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.381773 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.486576 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.486654 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.486676 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.486702 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.486976 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.590267 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.590306 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.590313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.590329 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.590340 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.693111 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.693158 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.693171 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.693193 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.693206 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.795249 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.795290 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.795301 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.795318 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.795331 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.897583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.897624 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.897631 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.897647 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:34 crc kubenswrapper[4675]: I1125 12:28:34.897656 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:34Z","lastTransitionTime":"2025-11-25T12:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.000177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.000238 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.000251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.000266 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.000277 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.102725 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.102774 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.102789 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.102809 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.102858 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.204949 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.205001 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.205076 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.205097 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.205109 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.296375 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.296414 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.296424 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.296440 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.296453 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.308723 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.312776 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.312834 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.312846 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.312864 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.312878 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.327005 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.330913 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.330960 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.330973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.330991 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.331002 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.342974 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.346338 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.346369 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.346378 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.346392 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.346401 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.358109 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.362194 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.362240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.362254 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.362271 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.362283 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.375193 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.375359 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.377666 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.377711 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.377720 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.377735 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.377747 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.480083 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.480168 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.480185 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.480204 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.480218 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.534029 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.534237 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.534356 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.534390 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.534448 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.534483 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.534583 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:35 crc kubenswrapper[4675]: E1125 12:28:35.534663 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.545748 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.555419 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.568069 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.582151 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.583970 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.584012 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.584024 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.584045 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.584057 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.598433 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.611276 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.625297 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.636515 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.648095 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.659753 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.669540 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.684241 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.686553 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.686722 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.686845 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.686917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.686979 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.695631 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.707168 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.729713 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.741902 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.755200 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.773435 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.789874 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.789906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.789915 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.789930 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.789941 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.893052 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.893091 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.893100 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.893117 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.893129 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.996758 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.997080 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.997177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.997269 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:35 crc kubenswrapper[4675]: I1125 12:28:35.997355 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:35Z","lastTransitionTime":"2025-11-25T12:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.099991 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.100034 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.100049 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.100067 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.100083 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.203570 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.203914 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.203924 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.203942 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.203953 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.307975 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.308015 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.308026 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.308044 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.308060 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.410364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.410407 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.410425 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.410442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.410452 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.514081 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.514125 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.514134 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.514153 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.514165 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.617475 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.617537 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.617550 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.617572 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.617586 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.720793 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.720884 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.720898 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.720915 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.720930 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.823147 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.823234 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.823246 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.823262 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.823271 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.926377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.926429 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.926442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.926460 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:36 crc kubenswrapper[4675]: I1125 12:28:36.926470 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:36Z","lastTransitionTime":"2025-11-25T12:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.028575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.028634 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.028646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.028663 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.028676 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.131518 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.131553 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.131562 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.131577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.131587 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.235333 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.235428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.235441 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.235460 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.235471 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.340571 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.340616 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.340627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.340648 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.340661 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.443533 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.443570 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.443580 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.443595 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.443604 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.531535 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:37 crc kubenswrapper[4675]: E1125 12:28:37.531727 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.531772 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.531772 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:37 crc kubenswrapper[4675]: E1125 12:28:37.531957 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.532173 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:37 crc kubenswrapper[4675]: E1125 12:28:37.532247 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:37 crc kubenswrapper[4675]: E1125 12:28:37.532399 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.545873 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.545911 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.545926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.545946 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.545957 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.648730 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.648777 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.648788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.648805 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.648834 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.751488 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.751524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.751536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.751557 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.751568 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.854475 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.854515 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.854528 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.854552 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.854564 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.957187 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.957273 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.957288 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.957305 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:37 crc kubenswrapper[4675]: I1125 12:28:37.957317 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:37Z","lastTransitionTime":"2025-11-25T12:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.059315 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.059351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.059362 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.059380 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.059392 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.161904 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.161946 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.161957 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.161975 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.161986 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.264167 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.264249 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.264265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.264279 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.264288 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.368343 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.368386 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.368399 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.368437 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.368452 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.470603 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.470637 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.470647 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.470664 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.470673 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.573670 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.573705 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.573715 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.573733 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.573744 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.676441 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.676477 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.676490 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.676506 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.676518 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.778609 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.778662 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.778671 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.778683 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.778692 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.881482 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.881517 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.881528 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.881544 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.881557 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.984225 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.984253 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.984263 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.984278 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:38 crc kubenswrapper[4675]: I1125 12:28:38.984287 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:38Z","lastTransitionTime":"2025-11-25T12:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.087652 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.087707 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.087728 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.087756 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.087777 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.190922 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.190951 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.190959 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.190972 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.190981 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.294016 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.294046 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.294057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.294073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.294083 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.397198 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.397236 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.397247 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.397263 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.397274 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.500181 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.500214 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.500224 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.500241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.500253 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.534001 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.534054 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.534094 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.534403 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:39 crc kubenswrapper[4675]: E1125 12:28:39.534556 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:39 crc kubenswrapper[4675]: E1125 12:28:39.534852 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:39 crc kubenswrapper[4675]: E1125 12:28:39.534875 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:39 crc kubenswrapper[4675]: E1125 12:28:39.534923 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.603251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.603295 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.603305 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.603321 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.603331 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.706078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.706122 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.706132 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.706149 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.706159 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.809208 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.809525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.809625 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.809703 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.809769 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.911835 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.911882 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.911891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.911910 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:39 crc kubenswrapper[4675]: I1125 12:28:39.911920 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:39Z","lastTransitionTime":"2025-11-25T12:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.014432 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.014469 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.014481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.014497 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.014508 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.116397 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.116681 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.116767 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.116868 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.116937 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.219606 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.219657 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.219669 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.219691 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.219706 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.322501 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.322558 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.322571 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.322587 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.322596 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.425213 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.425265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.425280 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.425299 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.425315 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.527482 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.527529 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.527543 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.527564 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.527578 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.630106 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.630137 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.630148 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.630163 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.630173 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.732798 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.732867 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.732881 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.732900 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.732916 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.835022 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.835063 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.835073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.835088 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.835099 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.938287 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.938337 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.938347 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.938361 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:40 crc kubenswrapper[4675]: I1125 12:28:40.938371 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:40Z","lastTransitionTime":"2025-11-25T12:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.040904 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.040938 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.040966 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.040981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.040991 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.143709 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.143778 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.143788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.143804 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.143830 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.246350 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.246394 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.246404 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.246423 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.246435 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.348962 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.349008 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.349018 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.349036 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.349049 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.451744 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.451787 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.451798 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.451829 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.451842 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.531698 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:41 crc kubenswrapper[4675]: E1125 12:28:41.531946 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.532105 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.532301 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.532365 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:41 crc kubenswrapper[4675]: E1125 12:28:41.532457 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:41 crc kubenswrapper[4675]: E1125 12:28:41.532545 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:41 crc kubenswrapper[4675]: E1125 12:28:41.532336 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.546935 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.554674 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.554912 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.555008 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.555097 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.555224 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.557198 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:41 crc kubenswrapper[4675]: E1125 12:28:41.557318 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:41 crc kubenswrapper[4675]: E1125 12:28:41.557385 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:13.557365862 +0000 UTC m=+98.728958213 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.657494 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.657533 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.657547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.657571 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.657585 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.759978 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.760020 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.760031 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.760048 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.760061 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.861874 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.861911 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.861923 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.861942 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.861953 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.964027 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.964298 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.964378 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.964470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:41 crc kubenswrapper[4675]: I1125 12:28:41.964548 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:41Z","lastTransitionTime":"2025-11-25T12:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.067032 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.067061 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.067072 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.067089 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.067101 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.169589 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.169637 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.169647 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.169660 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.169670 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.271783 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.272051 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.272124 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.272195 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.272257 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.374454 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.374496 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.374511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.374530 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.374541 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.476507 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.476543 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.476554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.476574 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.476587 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.533154 4675 scope.go:117] "RemoveContainer" containerID="7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3" Nov 25 12:28:42 crc kubenswrapper[4675]: E1125 12:28:42.533342 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.578931 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.578969 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.578982 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.579000 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.579012 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.681364 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.681399 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.681411 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.681435 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.681449 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.783536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.783575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.783588 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.783605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.783616 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.886391 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.886421 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.886432 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.886448 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.886459 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.989699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.989744 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.989755 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.989777 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:42 crc kubenswrapper[4675]: I1125 12:28:42.989789 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:42Z","lastTransitionTime":"2025-11-25T12:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.092372 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.092421 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.092437 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.092460 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.092471 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.194882 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.194926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.194937 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.194955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.194966 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.296773 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.296805 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.296833 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.296847 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.296856 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.399846 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.399878 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.399887 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.399952 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.399989 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.502520 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.502549 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.502559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.502574 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.502585 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.532008 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.532083 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.532015 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.532044 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:43 crc kubenswrapper[4675]: E1125 12:28:43.532183 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:43 crc kubenswrapper[4675]: E1125 12:28:43.532294 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:43 crc kubenswrapper[4675]: E1125 12:28:43.532386 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:43 crc kubenswrapper[4675]: E1125 12:28:43.532612 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.604598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.604632 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.604646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.604667 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.604679 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.707355 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.707680 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.707769 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.707883 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.707967 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.810665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.810709 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.810719 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.810735 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.810743 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.912963 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.912999 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.913012 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.913028 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:43 crc kubenswrapper[4675]: I1125 12:28:43.913040 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:43Z","lastTransitionTime":"2025-11-25T12:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.015328 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.015367 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.015376 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.015392 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.015402 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.117860 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.117905 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.117917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.117936 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.117949 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.220379 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.220434 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.220446 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.220465 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.220478 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.322934 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.322973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.322981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.322996 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.323006 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.425294 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.425516 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.425618 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.425689 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.425754 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.528459 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.528551 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.528580 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.528608 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.528631 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.631241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.631286 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.631297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.631316 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.631328 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.733257 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.733316 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.733326 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.733342 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.733353 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.836030 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.836072 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.836083 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.836098 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.836108 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.938693 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.938739 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.938750 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.938771 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:44 crc kubenswrapper[4675]: I1125 12:28:44.938783 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:44Z","lastTransitionTime":"2025-11-25T12:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.042577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.042618 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.042627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.042643 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.042653 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.144826 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.144863 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.144872 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.144886 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.144895 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.247021 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.247059 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.247071 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.247087 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.247097 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.348887 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.348923 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.348932 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.348945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.348956 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.451624 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.451663 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.451680 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.451697 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.451707 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.531746 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.532017 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.532091 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.532175 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.531803 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.532265 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.532532 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.532695 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.551585 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.554027 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.554057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.554069 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.554088 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.554101 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.604520 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.618863 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.632203 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.644346 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.656955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.656990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.657001 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.657019 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.657033 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.657918 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.669141 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.680328 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.695779 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.706354 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.708830 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.708865 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.708901 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.708920 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.708933 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.721177 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.723683 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.726927 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.726955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.726963 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.726977 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.726987 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.732963 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.738989 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.743013 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.743294 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.743405 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.743494 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.743586 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.745082 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.756159 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.756200 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.760502 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.760575 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.760589 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.760612 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.760627 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.771126 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.773472 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.780000 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.780293 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.780389 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.780498 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.780585 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.784130 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.795655 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: E1125 12:28:45.795832 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.799960 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.803038 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.803077 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.803087 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.803102 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.803114 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.815590 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.827879 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.905651 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.905691 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.905704 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.905719 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.905731 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:45Z","lastTransitionTime":"2025-11-25T12:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.907396 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/0.log" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.907442 4675 generic.go:334] "Generic (PLEG): container finished" podID="ede74da4-0d3a-463f-a591-b722f62358c8" containerID="b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0" exitCode=1 Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.907471 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerDied","Data":"b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0"} Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.907877 4675 scope.go:117] "RemoveContainer" containerID="b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.926180 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.946250 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.957245 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.970950 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.984229 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:45 crc kubenswrapper[4675]: I1125 12:28:45.996170 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:45Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.007893 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.007961 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.007976 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.008019 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.008032 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.009876 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.022790 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.040544 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.054013 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.066665 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.078427 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.090347 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.099983 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.110636 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.110676 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.110688 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.110708 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.110721 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.114398 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.127403 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.139453 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.150330 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.163109 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.212637 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.212672 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.212682 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.212698 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.212711 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.314888 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.314913 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.314921 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.314934 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.314943 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.417177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.417210 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.417219 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.417232 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.417241 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.519601 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.519652 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.519665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.519684 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.519696 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.622328 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.623166 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.623239 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.623313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.623378 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.726496 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.726548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.726562 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.726583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.726597 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.829588 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.829932 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.830040 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.830155 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.830252 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.912559 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/0.log" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.912842 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerStarted","Data":"ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.925468 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.931969 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.932008 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.932017 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.932033 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.932058 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:46Z","lastTransitionTime":"2025-11-25T12:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.937638 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.950702 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.962578 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:46 crc kubenswrapper[4675]: I1125 12:28:46.974501 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:46Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.010176 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.034635 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.034665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.034673 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.034701 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.034710 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.040283 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.060905 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.072140 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.082356 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.092449 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.101242 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.117110 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.136490 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.136529 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.136538 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.136554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.136565 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.139569 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.150375 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.164545 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.187202 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.214789 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.229794 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:47Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.239180 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.239218 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.239231 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.239248 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.239261 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.342161 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.342190 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.342200 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.342219 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.342230 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.445264 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.445292 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.445301 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.445314 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.445323 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.537369 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:47 crc kubenswrapper[4675]: E1125 12:28:47.537768 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.537945 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.538107 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.538260 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:47 crc kubenswrapper[4675]: E1125 12:28:47.538256 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:47 crc kubenswrapper[4675]: E1125 12:28:47.538326 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:47 crc kubenswrapper[4675]: E1125 12:28:47.538500 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.547406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.547441 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.547454 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.547468 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.547478 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.650421 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.650467 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.650481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.650500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.650513 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.752517 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.752566 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.752577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.752596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.752608 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.855128 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.855171 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.855184 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.855204 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.855218 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.957252 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.957294 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.957307 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.957325 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:47 crc kubenswrapper[4675]: I1125 12:28:47.957340 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:47Z","lastTransitionTime":"2025-11-25T12:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.059282 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.059352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.059362 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.059376 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.059384 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.161768 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.161828 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.161839 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.161852 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.161863 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.264514 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.264554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.264565 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.264581 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.264593 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.367390 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.367431 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.367440 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.367456 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.367466 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.470332 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.470382 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.470396 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.470417 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.470431 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.573300 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.573346 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.573354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.573369 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.573378 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.675808 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.675860 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.675871 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.675899 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.675911 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.778201 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.778252 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.778266 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.778284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.778296 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.880489 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.880526 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.880537 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.880555 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.880566 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.982389 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.982427 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.982441 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.982461 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:48 crc kubenswrapper[4675]: I1125 12:28:48.982473 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:48Z","lastTransitionTime":"2025-11-25T12:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.084974 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.085007 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.085016 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.085031 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.085041 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.188930 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.189031 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.189042 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.189057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.189107 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.291182 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.291249 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.291263 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.291289 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.291307 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.393641 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.393683 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.393694 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.393710 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.393719 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.496605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.496663 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.496676 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.496704 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.496716 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.532123 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.532206 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.532123 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:49 crc kubenswrapper[4675]: E1125 12:28:49.532320 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:49 crc kubenswrapper[4675]: E1125 12:28:49.532375 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.532155 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:49 crc kubenswrapper[4675]: E1125 12:28:49.532503 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:49 crc kubenswrapper[4675]: E1125 12:28:49.532542 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.600169 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.600237 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.600254 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.600275 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.600288 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.702343 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.702391 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.702402 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.702422 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.702436 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.805693 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.805742 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.805752 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.805769 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.805781 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.908458 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.908696 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.908779 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.908907 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:49 crc kubenswrapper[4675]: I1125 12:28:49.908992 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:49Z","lastTransitionTime":"2025-11-25T12:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.012386 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.012664 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.012910 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.013011 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.013089 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.116023 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.116082 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.116094 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.116111 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.116120 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.218095 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.218335 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.218402 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.218464 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.218539 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.321457 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.321500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.321511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.321525 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.321534 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.424099 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.424138 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.424150 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.424193 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.424204 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.528476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.529153 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.529178 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.529201 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.529215 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.632428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.632481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.632495 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.632517 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.632532 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.734934 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.734961 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.734969 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.734984 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.734993 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.837920 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.837960 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.837971 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.837990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.838003 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.940236 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.940285 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.940297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.940315 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:50 crc kubenswrapper[4675]: I1125 12:28:50.940330 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:50Z","lastTransitionTime":"2025-11-25T12:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.042707 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.042744 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.042771 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.042788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.042933 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.145547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.145584 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.145594 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.145609 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.145620 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.248003 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.248060 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.248073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.248093 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.248105 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.350312 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.350358 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.350367 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.350386 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.350401 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.453128 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.453183 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.453193 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.453210 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.453222 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.531947 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.531990 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.532001 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.531947 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:51 crc kubenswrapper[4675]: E1125 12:28:51.532104 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:51 crc kubenswrapper[4675]: E1125 12:28:51.532301 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:51 crc kubenswrapper[4675]: E1125 12:28:51.532506 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:51 crc kubenswrapper[4675]: E1125 12:28:51.532540 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.555548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.555584 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.555599 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.555616 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.555629 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.658277 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.658320 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.658336 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.658354 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.658365 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.761063 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.761099 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.761107 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.761123 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.761133 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.863198 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.863284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.863297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.863316 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.863330 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.964990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.965036 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.965048 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.965066 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:51 crc kubenswrapper[4675]: I1125 12:28:51.965078 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:51Z","lastTransitionTime":"2025-11-25T12:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.067566 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.067604 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.067612 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.067629 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.067638 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.169293 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.169336 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.169349 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.169365 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.169376 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.271646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.271747 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.271761 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.271780 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.271886 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.374067 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.374117 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.374128 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.374145 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.374157 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.476243 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.476297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.476315 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.476338 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.476354 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.577947 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.577976 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.577990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.578004 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.578012 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.680272 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.680316 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.680327 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.680347 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.680361 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.783795 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.783878 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.783901 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.783932 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.783953 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.886305 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.886379 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.886445 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.886488 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.886508 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.988973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.989012 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.989021 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.989035 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:52 crc kubenswrapper[4675]: I1125 12:28:52.989045 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:52Z","lastTransitionTime":"2025-11-25T12:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.092490 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.092524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.092533 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.092568 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.092583 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.197631 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.197675 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.197687 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.197707 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.197718 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.301369 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.301436 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.301452 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.301479 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.301495 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.405388 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.405440 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.405450 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.405467 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.405478 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.509194 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.509244 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.509258 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.509277 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.509294 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.531975 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.532038 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.532061 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.531995 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:53 crc kubenswrapper[4675]: E1125 12:28:53.532141 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:53 crc kubenswrapper[4675]: E1125 12:28:53.532264 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:53 crc kubenswrapper[4675]: E1125 12:28:53.532368 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:53 crc kubenswrapper[4675]: E1125 12:28:53.532432 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.611665 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.611711 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.611722 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.611740 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.611751 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.714244 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.714288 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.714297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.714313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.714325 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.817444 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.817485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.817494 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.817508 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.817517 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.920491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.920528 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.920535 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.920550 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:53 crc kubenswrapper[4675]: I1125 12:28:53.920562 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:53Z","lastTransitionTime":"2025-11-25T12:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.023162 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.023203 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.023216 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.023233 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.023244 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.125905 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.126040 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.126057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.126082 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.126093 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.228613 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.228675 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.228686 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.228708 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.228736 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.331480 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.331512 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.331520 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.331534 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.331543 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.433961 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.434025 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.434047 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.434074 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.434091 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.532628 4675 scope.go:117] "RemoveContainer" containerID="7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.536931 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.536987 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.537005 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.537030 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.537047 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.639869 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.639906 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.639915 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.639930 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.639939 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.742331 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.742383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.742395 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.742419 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.742467 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.845379 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.845421 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.845430 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.845447 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.845456 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.945360 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/2.log" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.947070 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.947108 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.947121 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.947139 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.947159 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:54Z","lastTransitionTime":"2025-11-25T12:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.949276 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.950445 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.971453 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:54Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:54 crc kubenswrapper[4675]: I1125 12:28:54.996975 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:54Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.016285 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.029332 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.042280 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.049957 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.049990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.049999 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.050013 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.050024 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.059983 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.081568 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.100020 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.114349 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.126954 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.141041 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.152098 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.152133 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.152141 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.152155 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.152166 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.159056 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.170938 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.187112 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.199574 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.213927 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.226135 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.236397 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.250450 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.254419 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.254460 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.254470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.254485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.254496 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.356829 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.356867 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.356879 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.356899 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.356910 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.459240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.459289 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.459302 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.459323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.459341 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.531900 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.531972 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.531919 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.532059 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.532247 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.532279 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.532560 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.532797 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.548553 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.558931 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.561896 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.561927 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.561935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.561949 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.561958 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.570711 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.579794 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.591004 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.610012 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.621483 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.636718 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.657609 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.664948 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.664983 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.664993 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.665016 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.665028 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.671478 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.691947 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.704074 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.726223 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.740182 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.753295 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.766402 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.768558 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.768612 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.768621 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.768636 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.768663 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.781528 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.799492 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.813734 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.838086 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.838124 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.838135 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.838151 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.838163 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.849155 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.852273 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.852302 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.852313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.852331 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.852342 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.864667 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.867613 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.867644 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.867656 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.867671 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.867683 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.878604 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.882091 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.882133 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.882146 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.882164 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.882176 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.896125 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.899101 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.899133 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.899144 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.899161 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.899171 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.915440 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.915747 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.917943 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.918002 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.918015 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.918032 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.918045 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:55Z","lastTransitionTime":"2025-11-25T12:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.953930 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/3.log" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.954533 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/2.log" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.956795 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" exitCode=1 Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.956861 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.956899 4675 scope.go:117] "RemoveContainer" containerID="7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.957431 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:28:55 crc kubenswrapper[4675]: E1125 12:28:55.957694 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.973021 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.989370 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:55 crc kubenswrapper[4675]: I1125 12:28:55.999437 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:55Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.012541 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.020194 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.020227 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.020240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.020257 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.020270 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.032091 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ba26892bbd7517deac21e4d20f5eb6af907af2d73e721bfcaa2cb853dec41b3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:28Z\\\",\\\"message\\\":\\\"777 6298 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.251893 6298 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252769 6298 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:28.252831 6298 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 12:28:28.252868 6298 factory.go:656] Stopping watch factory\\\\nI1125 12:28:28.252887 6298 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:28.253036 6298 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:28.284972 6298 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:28.285015 6298 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:28.285082 6298 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:28.285110 6298 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:28.285214 6298 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"penshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:55.307575 6618 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 12:28:55.307721 6618 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 12:28:55.308220 6618 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 12:28:55.308257 6618 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:55.308298 6618 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 12:28:55.308319 6618 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:55.308302 6618 factory.go:656] Stopping watch factory\\\\nI1125 12:28:55.342422 6618 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:55.342449 6618 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:55.342502 6618 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:55.342524 6618 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:55.342600 6618 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.051212 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.061666 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.072631 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.084635 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.096928 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.109637 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.122879 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.122928 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.122940 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.122958 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.122970 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.126516 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.141182 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.154898 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.167513 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.180369 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.191927 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.204485 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.214030 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.226616 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.226680 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.226697 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.226727 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.226745 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.329383 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.329432 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.329445 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.329466 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.329479 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.432052 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.432102 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.432120 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.432138 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.432147 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.535639 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.535692 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.535703 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.535721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.535733 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.638998 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.639052 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.639067 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.639086 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.639099 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.741218 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.741258 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.741351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.741375 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.741388 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.843289 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.843351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.843361 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.843385 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.843397 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.946756 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.946846 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.946865 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.946888 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.946903 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:56Z","lastTransitionTime":"2025-11-25T12:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.961297 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/3.log" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.964510 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:28:56 crc kubenswrapper[4675]: E1125 12:28:56.964652 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.977243 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:56 crc kubenswrapper[4675]: I1125 12:28:56.990173 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.003375 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.014731 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.026296 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.048124 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.049281 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.049324 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.049339 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.049361 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.049375 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.061278 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.077360 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.101248 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"penshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:55.307575 6618 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 12:28:55.307721 6618 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 12:28:55.308220 6618 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 12:28:55.308257 6618 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:55.308298 6618 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 12:28:55.308319 6618 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:55.308302 6618 factory.go:656] Stopping watch factory\\\\nI1125 12:28:55.342422 6618 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:55.342449 6618 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:55.342502 6618 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:55.342524 6618 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:55.342600 6618 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.118925 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.129276 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.152461 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.152489 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.152497 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.152509 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.152517 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.168310 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.178942 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.191118 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.205022 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.215269 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.228037 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.238307 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.248292 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.255336 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.255418 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.255435 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.255796 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.255843 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.358315 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.358352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.358360 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.358376 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.358384 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.463387 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.463444 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.463456 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.463473 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.463485 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.531317 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.531392 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.531319 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:57 crc kubenswrapper[4675]: E1125 12:28:57.531457 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:57 crc kubenswrapper[4675]: E1125 12:28:57.531516 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:57 crc kubenswrapper[4675]: E1125 12:28:57.531628 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.531924 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:57 crc kubenswrapper[4675]: E1125 12:28:57.532106 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.565534 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.565569 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.565579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.565594 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.565605 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.667997 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.668041 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.668049 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.668063 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.668072 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.770973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.771024 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.771038 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.771145 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.771162 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.873825 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.873874 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.873885 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.873904 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.873915 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.977053 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.977093 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.977103 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.977121 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:57 crc kubenswrapper[4675]: I1125 12:28:57.977129 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:57Z","lastTransitionTime":"2025-11-25T12:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.079703 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.079772 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.079788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.079833 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.079851 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.182371 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.182407 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.182418 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.182434 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.182443 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.286320 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.286365 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.286374 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.286393 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.286403 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.388517 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.388544 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.388552 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.388566 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.388576 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.493322 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.493568 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.493679 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.493795 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.493895 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.597124 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.597200 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.597216 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.597241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.597256 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.699479 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.699540 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.699552 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.699567 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.699577 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.802285 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.802329 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.802339 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.802352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.802363 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.905239 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.905295 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.905311 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.905330 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:58 crc kubenswrapper[4675]: I1125 12:28:58.905347 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:58Z","lastTransitionTime":"2025-11-25T12:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.008252 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.008305 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.008319 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.008338 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.008352 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.111708 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.111756 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.111768 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.111787 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.111800 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.214240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.214646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.214837 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.214964 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.215082 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.317935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.317973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.317984 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.318000 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.318012 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.420754 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.421018 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.421139 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.421263 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.421377 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.441595 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.441707 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.44168498 +0000 UTC m=+148.613277331 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.442045 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.442147 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.442247 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.442442 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442220 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442596 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442610 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442651 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.442639963 +0000 UTC m=+148.614232304 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442280 4675 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442747 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.442735596 +0000 UTC m=+148.614327937 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442341 4675 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442832 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.442799478 +0000 UTC m=+148.614391819 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442930 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.442993 4675 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.443009 4675 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.443097 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.443074058 +0000 UTC m=+148.614666399 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.524365 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.524590 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.524715 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.524929 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.525019 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.531863 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.531926 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.531956 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.532039 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.532349 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.532435 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.532483 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:28:59 crc kubenswrapper[4675]: E1125 12:28:59.532522 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.627598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.627839 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.627946 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.628032 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.628131 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.729928 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.729957 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.729966 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.729977 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.729986 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.832988 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.833057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.833071 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.833107 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.833124 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.935282 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.935318 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.935409 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.935428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:28:59 crc kubenswrapper[4675]: I1125 12:28:59.935439 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:28:59Z","lastTransitionTime":"2025-11-25T12:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.037408 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.037475 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.037503 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.037532 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.037553 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.140674 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.140713 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.140721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.140737 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.140746 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.243491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.243560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.243583 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.243613 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.243638 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.346194 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.346227 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.346257 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.346274 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.346284 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.449697 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.449760 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.449783 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.449813 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.449866 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.552624 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.552702 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.552721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.552745 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.552763 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.658431 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.658476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.658485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.658499 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.658508 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.760512 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.760556 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.760571 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.760587 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.760597 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.862859 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.862899 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.862907 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.862920 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.862929 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.965497 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.965550 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.965561 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.965579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:00 crc kubenswrapper[4675]: I1125 12:29:00.965591 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:00Z","lastTransitionTime":"2025-11-25T12:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.068256 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.068298 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.068307 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.068323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.068333 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.171405 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.171484 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.171531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.171554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.171578 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.274408 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.274450 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.274461 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.274477 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.274488 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.377057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.377087 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.377094 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.377107 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.377114 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.479948 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.480008 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.480024 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.480040 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.480051 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.532438 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.532460 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.532447 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.532502 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:01 crc kubenswrapper[4675]: E1125 12:29:01.532597 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:01 crc kubenswrapper[4675]: E1125 12:29:01.532668 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:01 crc kubenswrapper[4675]: E1125 12:29:01.532920 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:01 crc kubenswrapper[4675]: E1125 12:29:01.533054 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.582948 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.583002 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.583014 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.583032 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.583043 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.685527 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.685568 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.685581 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.685597 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.685608 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.789232 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.789269 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.789279 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.789298 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.789312 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.891183 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.891235 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.891249 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.891265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.891276 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.992926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.992955 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.992965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.992981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:01 crc kubenswrapper[4675]: I1125 12:29:01.992993 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:01Z","lastTransitionTime":"2025-11-25T12:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.095014 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.095051 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.095060 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.095073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.095085 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.197345 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.197381 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.197390 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.197403 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.197412 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.299234 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.299352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.299377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.299404 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.299441 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.402965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.403214 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.403237 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.403254 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.403263 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.505669 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.505712 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.505728 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.505751 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.505767 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.608243 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.608305 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.608321 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.608340 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.608352 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.711192 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.711230 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.711240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.711255 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.711265 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.814521 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.814567 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.814577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.814594 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.814607 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.916994 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.917265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.917346 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.917437 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:02 crc kubenswrapper[4675]: I1125 12:29:02.917520 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:02Z","lastTransitionTime":"2025-11-25T12:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.020058 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.020105 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.020121 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.020138 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.020149 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.122010 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.122073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.122090 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.122112 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.122128 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.223781 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.223810 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.223838 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.223852 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.223861 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.326075 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.326104 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.326111 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.326123 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.326131 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.428209 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.428251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.428264 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.428285 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.428299 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.530228 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.530262 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.530271 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.530284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.530294 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.531352 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.531388 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.531389 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.531352 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:03 crc kubenswrapper[4675]: E1125 12:29:03.531463 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:03 crc kubenswrapper[4675]: E1125 12:29:03.531628 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:03 crc kubenswrapper[4675]: E1125 12:29:03.531647 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:03 crc kubenswrapper[4675]: E1125 12:29:03.531689 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.632764 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.632861 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.632872 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.632893 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.632911 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.736047 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.736083 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.736096 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.736112 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.736124 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.837949 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.837998 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.838012 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.838036 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.838051 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.940554 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.940588 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.940598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.940612 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:03 crc kubenswrapper[4675]: I1125 12:29:03.940622 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:03Z","lastTransitionTime":"2025-11-25T12:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.043276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.043355 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.043377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.043404 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.043426 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.146982 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.147070 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.147089 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.147117 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.147133 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.250538 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.250591 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.250604 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.250620 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.250632 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.353485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.353808 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.353832 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.353848 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.353858 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.455668 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.455698 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.455706 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.455718 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.455727 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.557530 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.557572 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.557584 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.557599 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.557611 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.659692 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.659751 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.659763 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.659783 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.659797 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.762433 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.762510 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.762532 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.762560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.762580 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.865283 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.865336 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.865345 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.865359 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.865369 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.968112 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.968150 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.968159 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.968174 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:04 crc kubenswrapper[4675]: I1125 12:29:04.968182 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:04Z","lastTransitionTime":"2025-11-25T12:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.070965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.071006 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.071018 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.071037 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.071053 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.174164 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.174233 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.174254 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.174280 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.174297 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.276884 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.276940 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.276952 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.276972 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.276984 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.380240 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.380310 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.380323 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.380339 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.380350 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.482767 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.482836 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.482846 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.482860 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.482869 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.531635 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.531667 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.531785 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.531927 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:05 crc kubenswrapper[4675]: E1125 12:29:05.531924 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:05 crc kubenswrapper[4675]: E1125 12:29:05.532099 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:05 crc kubenswrapper[4675]: E1125 12:29:05.532269 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:05 crc kubenswrapper[4675]: E1125 12:29:05.532332 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.546158 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2e07bd2-ea2f-48da-9358-49fed47fa922\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf934f4204cb056848633231b02d0d1ae3b6e15da462e0a57cb6c98b2919a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kctvj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n7t8r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.560600 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-cgbpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ede74da4-0d3a-463f-a591-b722f62358c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:45Z\\\",\\\"message\\\":\\\"2025-11-25T12:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891\\\\n2025-11-25T12:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e79925c2-7acc-4004-9e07-61b648684891 to /host/opt/cni/bin/\\\\n2025-11-25T12:28:00Z [verbose] multus-daemon started\\\\n2025-11-25T12:28:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T12:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhml4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-cgbpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.585990 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.586029 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.586040 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.586057 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.586070 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.586703 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5671459-4981-4259-a31d-595dd6f1f4b3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T12:28:55Z\\\",\\\"message\\\":\\\"penshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 12:28:55.307575 6618 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 12:28:55.307721 6618 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 12:28:55.308220 6618 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 12:28:55.308257 6618 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 12:28:55.308298 6618 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 12:28:55.308319 6618 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 12:28:55.308302 6618 factory.go:656] Stopping watch factory\\\\nI1125 12:28:55.342422 6618 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1125 12:28:55.342449 6618 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1125 12:28:55.342502 6618 ovnkube.go:599] Stopped ovnkube\\\\nI1125 12:28:55.342524 6618 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 12:28:55.342600 6618 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrftd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gv9qh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.609860 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc839f2-7007-4918-8acc-6f01ddf15af0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6717440916aa40ca84d87b6f80e3b1f622d7d11d92ca2be78db8ee109bc666\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb6d3f43b24e6b6e86e54d549fe9a8480538df5463ebabb3cb56011f8af41cf2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7165c72bcef0f6824e471e75e39314c4a26f09a3cb72db70b33666f25fcdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cf6fda7bf26ed4e89d81fca8662cae1c833398ca7321b3476513f39348b2adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f47e91f8eeca376ffa01a645e5735935326e0fe3990945c4f3a444e418d007de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269459e41df7f873ea1491f89be762bdfb8a3577a927592bb6555b16e761dea2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f491a8c3f1c69cb94ab61ba56b93cec5353fc8f3ca2a0a2ccbd22f50f972c7c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4f42ed1b5f21ab175f82c4f507575c8bdbe14b93fe4c169047c8b2f8a0420ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.627039 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"439dd3d5-8975-4c1f-98e1-94226f1c3c93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbdc34805b74d4d25bd4db586a490a3f08156e17e81eb9fe4faf7a03dae99325\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5705f5134657c7c6af6bcdc56c092fb63be9c3b06f391279d164db0a0653209\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://655b960dfb6cacc540a066b0481586531f7298086c2d712aa00ce805ae93c2d3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.645573 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ada09650-e9d6-4e76-a600-7611979b0f31\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45e6a1623171aa935c5e61dc172391fa1e0e4a760443c272e7ba61fcd3d4d243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f868dcc18d779a5416be002b760644fad9843c6aa9734aa7f87c19318eeb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c6cd54e786c63121a6deeeea4edc4b73507a17161929ecb2d21d51f655f447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4647c5004779893aaa8422bbf431985224641ab7ecc196a3cb1cb296af37857d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.662686 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33589e918292fd2dabb9c73643d31917b95beb81e44bed329f6ef1befa965905\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb891bb9e715eaae0c1c15cf69e93a07915172b49c6d1bac27eaf143a39fddca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.681917 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e171dcdfc7317ff290aee43f77a59a2e86ba2eb2f711bcb968ec16ab27d84007\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.688752 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.688798 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.688840 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.688859 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.688875 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.695931 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dbcad6d13ba307907d26dd3b0e24b65cd49d0e1b7f0113f1f41113d149803b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.712610 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.733640 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-56fmx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31ed2ad2-a571-44ac-9f18-afd71427fd7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e861795306b1da375128213520292037a5daf08355992188c5bcc56df91c874e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e40f1ef8c503750506cd92ba987a80c53c8cc85d5fc78a87f90044757f919db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5149b51d28aa13e326869823bf92142d56bfde5a503c87e97a52f361dad7811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14c7fa9042a2cb5e2598cf0992335077542804f847d088fc1c8a6e2f90910aa0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62ed5e3c72046efff0fb225a93f57ac816f8c48554e9a3e786d1f0d7b0c936f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4bfb26b2a10813caf5feeb0da161170925c6ac32c8fdd5bc25cde1569b13d98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18871d147691a6e08c1ab7f3e18c6a4594c6f7cbde27fa69332029f18ae55673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:28:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ldwpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-56fmx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.748292 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"edf8a42b-383a-4d62-bfe0-ce2a4be626a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e402116cd4a759f9d705bba947332f306a878dd42af62db73549940f12fb4086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://02c75ed65404a0cc95cafec02e9e0298e6ee386cbe069e321245235b8b5af0b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.760744 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-whffq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"134942f4-79a7-4b14-9f21-ae027d146b44\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2r78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-whffq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.773330 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e52af8a1-50bf-41c1-8661-136814faf6c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:28:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf925783f9a74cfd8fca1d233b46fe15a4c99c493ec4ba85d329bf3f74a1dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cadd286080a317b1783e86490e9a75a1a1efb4deb2f4a78b38ec7a370ba12619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:28:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5fz4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:28:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cw6wl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.785299 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bcd9v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59b10c95-6525-4e1b-a48d-7e1fb681b8e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21dc5dc06f605f63a0ec7d7aa46f15825267ecc6856ff8b832e88e5eaed2d6e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g7wkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:58Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bcd9v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.791236 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.791267 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.791276 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.791293 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.791304 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.798806 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.809169 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkhpr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f78a3216-c1be-4cef-bf38-12b24f061f07\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99c0809327895cf23aae401349addc3ee86be97368a5ad380029d8b380b68af9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b67hl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkhpr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.830959 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.844036 4675 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc2869e-ed3f-48cf-9f10-a80e24888121\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T12:27:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T12:27:39Z\\\",\\\"message\\\":\\\"W1125 12:27:38.702749 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 12:27:38.703100 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764073658 cert, and key in /tmp/serving-cert-1099954182/serving-signer.crt, /tmp/serving-cert-1099954182/serving-signer.key\\\\nI1125 12:27:38.920705 1 observer_polling.go:159] Starting file observer\\\\nW1125 12:27:38.925393 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 12:27:38.925512 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 12:27:38.928252 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1099954182/tls.crt::/tmp/serving-cert-1099954182/tls.key\\\\\\\"\\\\nF1125 12:27:39.086695 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T12:27:38Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T12:27:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T12:27:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T12:27:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:05Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.894170 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.894211 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.894235 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.894250 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.894259 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.997248 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.997300 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.997313 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.997332 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:05 crc kubenswrapper[4675]: I1125 12:29:05.997344 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:05Z","lastTransitionTime":"2025-11-25T12:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.100737 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.101100 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.101231 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.101350 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.101462 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.204871 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.204909 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.204917 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.204932 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.204946 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.302531 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.302567 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.302578 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.302596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.302607 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: E1125 12:29:06.319493 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.323342 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.323392 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.323406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.323426 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.323438 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: E1125 12:29:06.338298 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.343393 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.343470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.343496 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.343965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.344417 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: E1125 12:29:06.358488 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.367134 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.367199 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.367211 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.367227 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.367241 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: E1125 12:29:06.381466 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.385121 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.385152 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.385164 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.385179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.385189 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: E1125 12:29:06.395931 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148060Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608860Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T12:29:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7da5d77d-65e7-4977-b3b0-0de1398892b4\\\",\\\"systemUUID\\\":\\\"04be46d1-9e64-47f5-b017-0d678111e234\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T12:29:06Z is after 2025-08-24T17:21:41Z" Nov 25 12:29:06 crc kubenswrapper[4675]: E1125 12:29:06.396066 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.397578 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.397615 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.397630 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.397648 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.397661 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.500428 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.500479 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.500494 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.500516 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.500533 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.603353 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.603712 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.603976 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.604207 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.604403 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.707529 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.707565 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.707578 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.707593 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.707603 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.809635 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.809868 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.809970 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.810078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.810164 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.913238 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.913290 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.913304 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.913322 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:06 crc kubenswrapper[4675]: I1125 12:29:06.913715 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:06Z","lastTransitionTime":"2025-11-25T12:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.015755 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.015789 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.015798 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.015832 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.015842 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.118717 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.118766 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.118780 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.118799 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.118826 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.222120 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.222188 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.222212 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.222243 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.222264 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.323791 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.323834 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.323843 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.323859 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.323870 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.425563 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.425593 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.425602 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.425614 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.425623 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.528389 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.528413 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.528421 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.528434 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.528441 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.531724 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.531863 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:07 crc kubenswrapper[4675]: E1125 12:29:07.531965 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.532044 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:07 crc kubenswrapper[4675]: E1125 12:29:07.532129 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:07 crc kubenswrapper[4675]: E1125 12:29:07.532268 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.532492 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:07 crc kubenswrapper[4675]: E1125 12:29:07.532669 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.630945 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.630984 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.630994 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.631009 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.631021 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.733339 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.733406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.733426 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.733451 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.733470 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.836167 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.836232 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.836259 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.836288 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.836310 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.939541 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.939598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.939612 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.939630 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:07 crc kubenswrapper[4675]: I1125 12:29:07.939641 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:07Z","lastTransitionTime":"2025-11-25T12:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.042124 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.042158 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.042166 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.042180 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.042189 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.144759 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.144797 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.144809 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.144842 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.144853 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.246673 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.246713 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.246724 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.246739 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.246750 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.348936 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.348965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.348973 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.348985 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.348994 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.451994 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.452029 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.452040 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.452056 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.452070 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.532547 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:29:08 crc kubenswrapper[4675]: E1125 12:29:08.532691 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.554210 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.554511 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.554678 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.554767 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.555008 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.657897 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.657937 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.657947 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.657965 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.657976 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.761267 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.761317 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.761328 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.761346 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.761358 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.863461 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.863489 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.863499 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.863555 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.863581 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.965808 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.966043 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.966133 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.966205 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:08 crc kubenswrapper[4675]: I1125 12:29:08.966267 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:08Z","lastTransitionTime":"2025-11-25T12:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.068633 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.068674 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.068685 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.068699 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.068709 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.170705 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.170739 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.170747 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.170761 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.170769 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.273195 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.273267 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.273280 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.273299 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.273311 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.375634 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.375690 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.375710 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.375734 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.375751 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.478449 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.478483 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.478492 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.478505 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.478515 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.532143 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.532342 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.532369 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.532562 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:09 crc kubenswrapper[4675]: E1125 12:29:09.532561 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:09 crc kubenswrapper[4675]: E1125 12:29:09.532679 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:09 crc kubenswrapper[4675]: E1125 12:29:09.533141 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:09 crc kubenswrapper[4675]: E1125 12:29:09.533267 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.581341 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.581372 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.581380 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.581394 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.581415 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.683204 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.683241 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.683251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.683265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.683276 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.785782 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.785808 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.785859 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.785877 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.785887 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.888359 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.888412 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.888421 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.888434 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.888445 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.991447 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.991491 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.991500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.991516 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:09 crc kubenswrapper[4675]: I1125 12:29:09.991525 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:09Z","lastTransitionTime":"2025-11-25T12:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.094207 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.094270 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.094284 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.094309 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.094330 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.197019 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.197060 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.197069 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.197085 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.197096 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.300991 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.301028 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.301038 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.301056 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.301067 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.403552 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.403601 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.403613 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.403631 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.403641 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.506747 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.506788 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.506799 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.506835 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.506846 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.610151 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.610186 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.610197 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.610210 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.610222 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.714333 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.714377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.714385 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.714399 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.714410 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.816635 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.816730 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.816745 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.816764 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.816776 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.919564 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.919595 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.919605 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.919623 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:10 crc kubenswrapper[4675]: I1125 12:29:10.919633 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:10Z","lastTransitionTime":"2025-11-25T12:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.022127 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.022153 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.022162 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.022175 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.022184 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.124569 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.124598 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.124606 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.124619 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.124627 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.227021 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.227084 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.227108 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.227152 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.227177 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.329797 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.329852 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.329869 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.329885 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.329895 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.431524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.431566 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.431580 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.431596 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.431608 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.532291 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.532413 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.532938 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.532981 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:11 crc kubenswrapper[4675]: E1125 12:29:11.533124 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:11 crc kubenswrapper[4675]: E1125 12:29:11.533224 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:11 crc kubenswrapper[4675]: E1125 12:29:11.533359 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:11 crc kubenswrapper[4675]: E1125 12:29:11.533417 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.533904 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.534158 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.534183 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.534231 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.534253 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.636632 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.636694 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.636717 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.636746 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.636768 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.739610 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.739701 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.739729 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.739757 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.739777 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.842675 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.842737 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.842760 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.842789 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.842861 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.945306 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.945363 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.945382 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.945406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:11 crc kubenswrapper[4675]: I1125 12:29:11.945423 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:11Z","lastTransitionTime":"2025-11-25T12:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.049115 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.049416 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.049614 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.049754 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.049877 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.152614 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.152676 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.152700 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.152728 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.152748 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.254884 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.255217 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.255336 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.255438 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.255531 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.358189 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.358219 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.358229 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.358245 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.358256 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.460794 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.460842 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.460852 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.460865 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.460898 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.563436 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.563493 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.563504 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.563520 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.563541 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.665509 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.665536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.665548 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.665562 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.665570 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.767847 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.768131 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.768236 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.768405 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.768489 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.871336 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.871374 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.871387 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.871404 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.871416 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.974059 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.974098 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.974109 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.974125 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:12 crc kubenswrapper[4675]: I1125 12:29:12.974135 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:12Z","lastTransitionTime":"2025-11-25T12:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.076106 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.076377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.076573 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.076696 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.076784 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.179285 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.179356 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.179369 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.179384 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.179395 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.282432 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.282478 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.282490 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.282506 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.282517 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.384579 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.385293 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.385454 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.385701 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.385798 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.487928 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.487981 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.487994 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.488011 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.488023 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.532104 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.532254 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.532594 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:13 crc kubenswrapper[4675]: E1125 12:29:13.532760 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.532785 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:13 crc kubenswrapper[4675]: E1125 12:29:13.533268 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:13 crc kubenswrapper[4675]: E1125 12:29:13.533407 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:13 crc kubenswrapper[4675]: E1125 12:29:13.533506 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.581158 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:13 crc kubenswrapper[4675]: E1125 12:29:13.581280 4675 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:29:13 crc kubenswrapper[4675]: E1125 12:29:13.581325 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs podName:134942f4-79a7-4b14-9f21-ae027d146b44 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:17.581312271 +0000 UTC m=+162.752904612 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs") pod "network-metrics-daemon-whffq" (UID: "134942f4-79a7-4b14-9f21-ae027d146b44") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.590474 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.590521 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.590536 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.590560 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.590575 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.693118 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.693161 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.693172 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.693189 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.693201 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.795470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.795526 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.795547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.795573 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.795590 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.898028 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.898073 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.898086 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.898103 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:13 crc kubenswrapper[4675]: I1125 12:29:13.898115 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:13Z","lastTransitionTime":"2025-11-25T12:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.001119 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.001156 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.001165 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.001179 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.001188 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.103571 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.103631 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.103646 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.103667 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.103682 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.206238 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.206322 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.206346 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.206376 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.206398 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.308481 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.308527 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.308535 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.308547 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.308555 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.411318 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.411374 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.411387 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.411406 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.411419 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.515419 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.515470 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.515480 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.515503 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.515517 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.617566 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.617610 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.617625 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.617647 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.617661 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.720293 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.720340 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.720351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.720366 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.720375 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.822524 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.822557 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.822565 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.822577 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.822585 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.925570 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.925645 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.925666 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.925690 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:14 crc kubenswrapper[4675]: I1125 12:29:14.925708 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:14Z","lastTransitionTime":"2025-11-25T12:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.027926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.027964 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.027975 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.027988 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.027998 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.132078 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.132157 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.132177 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.132559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.132777 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.236185 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.236251 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.236265 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.236283 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.236294 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.339227 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.339302 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.339352 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.339377 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.339391 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.442243 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.442287 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.442297 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.442312 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.442325 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.532025 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.532143 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:15 crc kubenswrapper[4675]: E1125 12:29:15.532246 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:15 crc kubenswrapper[4675]: E1125 12:29:15.532377 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.532156 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.532442 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:15 crc kubenswrapper[4675]: E1125 12:29:15.532509 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:15 crc kubenswrapper[4675]: E1125 12:29:15.532559 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.544280 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.544692 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.544935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.545116 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.545395 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.567330 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-bcd9v" podStartSLOduration=80.567303145 podStartE2EDuration="1m20.567303145s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.551609323 +0000 UTC m=+100.723201664" watchObservedRunningTime="2025-11-25 12:29:15.567303145 +0000 UTC m=+100.738895496" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.579884 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-xkhpr" podStartSLOduration=81.579858956 podStartE2EDuration="1m21.579858956s" podCreationTimestamp="2025-11-25 12:27:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.579580156 +0000 UTC m=+100.751172507" watchObservedRunningTime="2025-11-25 12:29:15.579858956 +0000 UTC m=+100.751451337" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.616489 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=81.616471213 podStartE2EDuration="1m21.616471213s" podCreationTimestamp="2025-11-25 12:27:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.616317008 +0000 UTC m=+100.787909399" watchObservedRunningTime="2025-11-25 12:29:15.616471213 +0000 UTC m=+100.788063564" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.648393 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.648441 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.648456 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.648473 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.648487 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.651900 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podStartSLOduration=80.651875718 podStartE2EDuration="1m20.651875718s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.632180975 +0000 UTC m=+100.803773316" watchObservedRunningTime="2025-11-25 12:29:15.651875718 +0000 UTC m=+100.823468079" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.679899 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-cgbpj" podStartSLOduration=80.679881903 podStartE2EDuration="1m20.679881903s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.651542886 +0000 UTC m=+100.823135247" watchObservedRunningTime="2025-11-25 12:29:15.679881903 +0000 UTC m=+100.851474254" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.719527 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=74.719508066 podStartE2EDuration="1m14.719508066s" podCreationTimestamp="2025-11-25 12:28:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.719491115 +0000 UTC m=+100.891083466" watchObservedRunningTime="2025-11-25 12:29:15.719508066 +0000 UTC m=+100.891100407" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.720206 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=78.7201987 podStartE2EDuration="1m18.7201987s" podCreationTimestamp="2025-11-25 12:27:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.706483788 +0000 UTC m=+100.878076129" watchObservedRunningTime="2025-11-25 12:29:15.7201987 +0000 UTC m=+100.891791061" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.749181 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=48.749162439 podStartE2EDuration="48.749162439s" podCreationTimestamp="2025-11-25 12:28:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.735346763 +0000 UTC m=+100.906939104" watchObservedRunningTime="2025-11-25 12:29:15.749162439 +0000 UTC m=+100.920754780" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.750379 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.750415 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.750427 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.750442 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.750452 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.827728 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-56fmx" podStartSLOduration=80.827704801 podStartE2EDuration="1m20.827704801s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.817842653 +0000 UTC m=+100.989435014" watchObservedRunningTime="2025-11-25 12:29:15.827704801 +0000 UTC m=+100.999297162" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.828674 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=34.828666374 podStartE2EDuration="34.828666374s" podCreationTimestamp="2025-11-25 12:28:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.827912277 +0000 UTC m=+100.999504618" watchObservedRunningTime="2025-11-25 12:29:15.828666374 +0000 UTC m=+101.000258735" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.847046 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cw6wl" podStartSLOduration=79.84702762 podStartE2EDuration="1m19.84702762s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:15.846426028 +0000 UTC m=+101.018018389" watchObservedRunningTime="2025-11-25 12:29:15.84702762 +0000 UTC m=+101.018619981" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.852958 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.853010 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.853020 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.853037 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.853064 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.955051 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.955088 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.955098 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.955114 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:15 crc kubenswrapper[4675]: I1125 12:29:15.955124 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:15Z","lastTransitionTime":"2025-11-25T12:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.057848 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.057936 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.057968 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.057996 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.058019 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.159891 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.159926 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.159935 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.159949 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.159957 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.262438 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.262476 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.262485 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.262500 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.262510 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.364934 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.365169 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.365242 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.365319 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.365411 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.468072 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.468559 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.468650 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.468725 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.468837 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.571638 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.571705 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.571783 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.571845 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.571869 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.674733 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.675033 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.675137 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.675257 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.675358 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.777351 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.777627 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.777721 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.777793 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.777931 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.784595 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.784801 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.784885 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.784957 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.785015 4675 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T12:29:16Z","lastTransitionTime":"2025-11-25T12:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.823064 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27"] Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.823400 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:16 crc kubenswrapper[4675]: W1125 12:29:16.825476 4675 reflector.go:561] object-"openshift-cluster-version"/"cluster-version-operator-serving-cert": failed to list *v1.Secret: secrets "cluster-version-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-version": no relationship found between node 'crc' and this object Nov 25 12:29:16 crc kubenswrapper[4675]: E1125 12:29:16.825510 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-version\"/\"cluster-version-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cluster-version-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-version\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:16 crc kubenswrapper[4675]: W1125 12:29:16.825625 4675 reflector.go:561] object-"openshift-cluster-version"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-version": no relationship found between node 'crc' and this object Nov 25 12:29:16 crc kubenswrapper[4675]: E1125 12:29:16.825719 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-version\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-version\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:16 crc kubenswrapper[4675]: W1125 12:29:16.825681 4675 reflector.go:561] object-"openshift-cluster-version"/"default-dockercfg-gxtc4": failed to list *v1.Secret: secrets "default-dockercfg-gxtc4" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-version": no relationship found between node 'crc' and this object Nov 25 12:29:16 crc kubenswrapper[4675]: E1125 12:29:16.825867 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-version\"/\"default-dockercfg-gxtc4\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"default-dockercfg-gxtc4\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-version\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:16 crc kubenswrapper[4675]: W1125 12:29:16.826446 4675 reflector.go:561] object-"openshift-cluster-version"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-version": no relationship found between node 'crc' and this object Nov 25 12:29:16 crc kubenswrapper[4675]: E1125 12:29:16.826477 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-version\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-version\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.916847 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02abb557-a00c-419e-bc4f-69ef5fff8564-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.916894 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02abb557-a00c-419e-bc4f-69ef5fff8564-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.916924 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/02abb557-a00c-419e-bc4f-69ef5fff8564-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.916947 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/02abb557-a00c-419e-bc4f-69ef5fff8564-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:16 crc kubenswrapper[4675]: I1125 12:29:16.916979 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/02abb557-a00c-419e-bc4f-69ef5fff8564-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.018338 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02abb557-a00c-419e-bc4f-69ef5fff8564-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.018381 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02abb557-a00c-419e-bc4f-69ef5fff8564-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.018409 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/02abb557-a00c-419e-bc4f-69ef5fff8564-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.018429 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/02abb557-a00c-419e-bc4f-69ef5fff8564-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.018459 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/02abb557-a00c-419e-bc4f-69ef5fff8564-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.018543 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/02abb557-a00c-419e-bc4f-69ef5fff8564-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.019051 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/02abb557-a00c-419e-bc4f-69ef5fff8564-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.532336 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:17 crc kubenswrapper[4675]: E1125 12:29:17.532552 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.532670 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:17 crc kubenswrapper[4675]: E1125 12:29:17.532863 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.532958 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:17 crc kubenswrapper[4675]: E1125 12:29:17.533096 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:17 crc kubenswrapper[4675]: I1125 12:29:17.533241 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:17 crc kubenswrapper[4675]: E1125 12:29:17.533439 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.019800 4675 configmap.go:193] Couldn't get configMap openshift-cluster-version/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.019869 4675 secret.go:188] Couldn't get secret openshift-cluster-version/cluster-version-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.019877 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/02abb557-a00c-419e-bc4f-69ef5fff8564-service-ca podName:02abb557-a00c-419e-bc4f-69ef5fff8564 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:18.519861322 +0000 UTC m=+103.691453653 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/02abb557-a00c-419e-bc4f-69ef5fff8564-service-ca") pod "cluster-version-operator-5c965bbfc6-4mt27" (UID: "02abb557-a00c-419e-bc4f-69ef5fff8564") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.019966 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/02abb557-a00c-419e-bc4f-69ef5fff8564-serving-cert podName:02abb557-a00c-419e-bc4f-69ef5fff8564 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:18.519943704 +0000 UTC m=+103.691536075 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/02abb557-a00c-419e-bc4f-69ef5fff8564-serving-cert") pod "cluster-version-operator-5c965bbfc6-4mt27" (UID: "02abb557-a00c-419e-bc4f-69ef5fff8564") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.031544 4675 projected.go:288] Couldn't get configMap openshift-cluster-version/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.031590 4675 projected.go:194] Error preparing data for projected volume kube-api-access for pod openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: E1125 12:29:18.031662 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/02abb557-a00c-419e-bc4f-69ef5fff8564-kube-api-access podName:02abb557-a00c-419e-bc4f-69ef5fff8564 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:18.531639166 +0000 UTC m=+103.703231537 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/02abb557-a00c-419e-bc4f-69ef5fff8564-kube-api-access") pod "cluster-version-operator-5c965bbfc6-4mt27" (UID: "02abb557-a00c-419e-bc4f-69ef5fff8564") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.078850 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.084581 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.146588 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.380213 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.537055 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/02abb557-a00c-419e-bc4f-69ef5fff8564-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.537149 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02abb557-a00c-419e-bc4f-69ef5fff8564-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.537174 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02abb557-a00c-419e-bc4f-69ef5fff8564-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.538472 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/02abb557-a00c-419e-bc4f-69ef5fff8564-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.543245 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02abb557-a00c-419e-bc4f-69ef5fff8564-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.544355 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02abb557-a00c-419e-bc4f-69ef5fff8564-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4mt27\" (UID: \"02abb557-a00c-419e-bc4f-69ef5fff8564\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:18 crc kubenswrapper[4675]: I1125 12:29:18.637511 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.035094 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" event={"ID":"02abb557-a00c-419e-bc4f-69ef5fff8564","Type":"ContainerStarted","Data":"875361afb884f59ceb867bdcac50564f6046aabcb14307bb2eb559bc34b44bcf"} Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.035144 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" event={"ID":"02abb557-a00c-419e-bc4f-69ef5fff8564","Type":"ContainerStarted","Data":"03232d2d5d5a5a8c699c2f9521456b5519334e2eadbe2b5a83cade95ba423a47"} Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.048117 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4mt27" podStartSLOduration=84.048100183 podStartE2EDuration="1m24.048100183s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:19.046463266 +0000 UTC m=+104.218055607" watchObservedRunningTime="2025-11-25 12:29:19.048100183 +0000 UTC m=+104.219692524" Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.532320 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:19 crc kubenswrapper[4675]: E1125 12:29:19.532722 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.532393 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:19 crc kubenswrapper[4675]: E1125 12:29:19.532859 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.532509 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:19 crc kubenswrapper[4675]: E1125 12:29:19.532943 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:19 crc kubenswrapper[4675]: I1125 12:29:19.532357 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:19 crc kubenswrapper[4675]: E1125 12:29:19.533008 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:21 crc kubenswrapper[4675]: I1125 12:29:21.532037 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:21 crc kubenswrapper[4675]: I1125 12:29:21.532093 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:21 crc kubenswrapper[4675]: I1125 12:29:21.532142 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:21 crc kubenswrapper[4675]: I1125 12:29:21.532046 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:21 crc kubenswrapper[4675]: E1125 12:29:21.532221 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:21 crc kubenswrapper[4675]: E1125 12:29:21.532350 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:21 crc kubenswrapper[4675]: E1125 12:29:21.532568 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:21 crc kubenswrapper[4675]: E1125 12:29:21.532651 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:23 crc kubenswrapper[4675]: I1125 12:29:23.531849 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:23 crc kubenswrapper[4675]: I1125 12:29:23.531916 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:23 crc kubenswrapper[4675]: E1125 12:29:23.531954 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:23 crc kubenswrapper[4675]: E1125 12:29:23.532046 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:23 crc kubenswrapper[4675]: I1125 12:29:23.532124 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:23 crc kubenswrapper[4675]: E1125 12:29:23.532178 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:23 crc kubenswrapper[4675]: I1125 12:29:23.532682 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:23 crc kubenswrapper[4675]: I1125 12:29:23.532717 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:29:23 crc kubenswrapper[4675]: E1125 12:29:23.532941 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:23 crc kubenswrapper[4675]: E1125 12:29:23.532974 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gv9qh_openshift-ovn-kubernetes(e5671459-4981-4259-a31d-595dd6f1f4b3)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" Nov 25 12:29:25 crc kubenswrapper[4675]: I1125 12:29:25.531991 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:25 crc kubenswrapper[4675]: I1125 12:29:25.532125 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:25 crc kubenswrapper[4675]: I1125 12:29:25.532141 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:25 crc kubenswrapper[4675]: I1125 12:29:25.532171 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:25 crc kubenswrapper[4675]: E1125 12:29:25.534256 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:25 crc kubenswrapper[4675]: E1125 12:29:25.534309 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:25 crc kubenswrapper[4675]: E1125 12:29:25.534421 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:25 crc kubenswrapper[4675]: E1125 12:29:25.537900 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:27 crc kubenswrapper[4675]: I1125 12:29:27.531448 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:27 crc kubenswrapper[4675]: I1125 12:29:27.531545 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:27 crc kubenswrapper[4675]: E1125 12:29:27.531601 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:27 crc kubenswrapper[4675]: E1125 12:29:27.531736 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:27 crc kubenswrapper[4675]: I1125 12:29:27.531856 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:27 crc kubenswrapper[4675]: E1125 12:29:27.532029 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:27 crc kubenswrapper[4675]: I1125 12:29:27.532358 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:27 crc kubenswrapper[4675]: E1125 12:29:27.532515 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:29 crc kubenswrapper[4675]: I1125 12:29:29.532073 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:29 crc kubenswrapper[4675]: E1125 12:29:29.532211 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:29 crc kubenswrapper[4675]: I1125 12:29:29.533062 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:29 crc kubenswrapper[4675]: I1125 12:29:29.533079 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:29 crc kubenswrapper[4675]: E1125 12:29:29.533333 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:29 crc kubenswrapper[4675]: I1125 12:29:29.533110 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:29 crc kubenswrapper[4675]: E1125 12:29:29.533410 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:29 crc kubenswrapper[4675]: E1125 12:29:29.533336 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:31 crc kubenswrapper[4675]: I1125 12:29:31.532295 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:31 crc kubenswrapper[4675]: I1125 12:29:31.532333 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:31 crc kubenswrapper[4675]: E1125 12:29:31.532399 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:31 crc kubenswrapper[4675]: I1125 12:29:31.532296 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:31 crc kubenswrapper[4675]: E1125 12:29:31.532485 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:31 crc kubenswrapper[4675]: I1125 12:29:31.532351 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:31 crc kubenswrapper[4675]: E1125 12:29:31.532526 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:31 crc kubenswrapper[4675]: E1125 12:29:31.532588 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:32 crc kubenswrapper[4675]: I1125 12:29:32.076532 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/1.log" Nov 25 12:29:32 crc kubenswrapper[4675]: I1125 12:29:32.077472 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/0.log" Nov 25 12:29:32 crc kubenswrapper[4675]: I1125 12:29:32.077525 4675 generic.go:334] "Generic (PLEG): container finished" podID="ede74da4-0d3a-463f-a591-b722f62358c8" containerID="ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533" exitCode=1 Nov 25 12:29:32 crc kubenswrapper[4675]: I1125 12:29:32.077564 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerDied","Data":"ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533"} Nov 25 12:29:32 crc kubenswrapper[4675]: I1125 12:29:32.077604 4675 scope.go:117] "RemoveContainer" containerID="b197e8503b7ccbddf775e5c1766024955760e73bd69e4f73efd29d0499f3efa0" Nov 25 12:29:32 crc kubenswrapper[4675]: I1125 12:29:32.078597 4675 scope.go:117] "RemoveContainer" containerID="ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533" Nov 25 12:29:32 crc kubenswrapper[4675]: E1125 12:29:32.081350 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-cgbpj_openshift-multus(ede74da4-0d3a-463f-a591-b722f62358c8)\"" pod="openshift-multus/multus-cgbpj" podUID="ede74da4-0d3a-463f-a591-b722f62358c8" Nov 25 12:29:33 crc kubenswrapper[4675]: I1125 12:29:33.083140 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/1.log" Nov 25 12:29:33 crc kubenswrapper[4675]: I1125 12:29:33.541500 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:33 crc kubenswrapper[4675]: E1125 12:29:33.541624 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:33 crc kubenswrapper[4675]: I1125 12:29:33.541632 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:33 crc kubenswrapper[4675]: E1125 12:29:33.541698 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:33 crc kubenswrapper[4675]: I1125 12:29:33.542643 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:33 crc kubenswrapper[4675]: E1125 12:29:33.542755 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:33 crc kubenswrapper[4675]: I1125 12:29:33.543051 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:33 crc kubenswrapper[4675]: E1125 12:29:33.543255 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:35 crc kubenswrapper[4675]: E1125 12:29:35.515791 4675 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 12:29:35 crc kubenswrapper[4675]: I1125 12:29:35.532297 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:35 crc kubenswrapper[4675]: I1125 12:29:35.532352 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:35 crc kubenswrapper[4675]: I1125 12:29:35.532370 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:35 crc kubenswrapper[4675]: I1125 12:29:35.532385 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:35 crc kubenswrapper[4675]: E1125 12:29:35.533673 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:35 crc kubenswrapper[4675]: E1125 12:29:35.533747 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:35 crc kubenswrapper[4675]: E1125 12:29:35.533801 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:35 crc kubenswrapper[4675]: E1125 12:29:35.533862 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:35 crc kubenswrapper[4675]: E1125 12:29:35.666756 4675 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 12:29:37 crc kubenswrapper[4675]: I1125 12:29:37.531925 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:37 crc kubenswrapper[4675]: I1125 12:29:37.531957 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:37 crc kubenswrapper[4675]: I1125 12:29:37.531957 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:37 crc kubenswrapper[4675]: E1125 12:29:37.533357 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:37 crc kubenswrapper[4675]: E1125 12:29:37.532779 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:37 crc kubenswrapper[4675]: E1125 12:29:37.533210 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:37 crc kubenswrapper[4675]: I1125 12:29:37.532006 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:37 crc kubenswrapper[4675]: E1125 12:29:37.533798 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:38 crc kubenswrapper[4675]: I1125 12:29:38.532165 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.102695 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/3.log" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.106533 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerStarted","Data":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.107126 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.148727 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podStartSLOduration=104.14871016 podStartE2EDuration="1m44.14871016s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:29:39.148494173 +0000 UTC m=+124.320086514" watchObservedRunningTime="2025-11-25 12:29:39.14871016 +0000 UTC m=+124.320302501" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.443377 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-whffq"] Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.443505 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:39 crc kubenswrapper[4675]: E1125 12:29:39.443599 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.532217 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:39 crc kubenswrapper[4675]: E1125 12:29:39.532320 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.532493 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:39 crc kubenswrapper[4675]: E1125 12:29:39.532538 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:39 crc kubenswrapper[4675]: I1125 12:29:39.532708 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:39 crc kubenswrapper[4675]: E1125 12:29:39.532752 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:40 crc kubenswrapper[4675]: I1125 12:29:40.531931 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:40 crc kubenswrapper[4675]: E1125 12:29:40.532097 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:40 crc kubenswrapper[4675]: E1125 12:29:40.669370 4675 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 12:29:41 crc kubenswrapper[4675]: I1125 12:29:41.532024 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:41 crc kubenswrapper[4675]: E1125 12:29:41.532388 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:41 crc kubenswrapper[4675]: I1125 12:29:41.532125 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:41 crc kubenswrapper[4675]: E1125 12:29:41.532475 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:41 crc kubenswrapper[4675]: I1125 12:29:41.532024 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:41 crc kubenswrapper[4675]: E1125 12:29:41.532538 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:42 crc kubenswrapper[4675]: I1125 12:29:42.531931 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:42 crc kubenswrapper[4675]: E1125 12:29:42.532093 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:43 crc kubenswrapper[4675]: I1125 12:29:43.531459 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:43 crc kubenswrapper[4675]: E1125 12:29:43.531712 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:43 crc kubenswrapper[4675]: I1125 12:29:43.532104 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:43 crc kubenswrapper[4675]: E1125 12:29:43.532242 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:43 crc kubenswrapper[4675]: I1125 12:29:43.532620 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:43 crc kubenswrapper[4675]: E1125 12:29:43.532756 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:44 crc kubenswrapper[4675]: I1125 12:29:44.532214 4675 scope.go:117] "RemoveContainer" containerID="ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533" Nov 25 12:29:44 crc kubenswrapper[4675]: I1125 12:29:44.532445 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:44 crc kubenswrapper[4675]: E1125 12:29:44.533561 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:45 crc kubenswrapper[4675]: I1125 12:29:45.128667 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/1.log" Nov 25 12:29:45 crc kubenswrapper[4675]: I1125 12:29:45.128720 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerStarted","Data":"ee35c51144c4041593a77cf4bb532a23c3a6c5b4fcd31a566eca138a5f16e8dd"} Nov 25 12:29:45 crc kubenswrapper[4675]: I1125 12:29:45.532071 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:45 crc kubenswrapper[4675]: I1125 12:29:45.532074 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:45 crc kubenswrapper[4675]: I1125 12:29:45.532140 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:45 crc kubenswrapper[4675]: E1125 12:29:45.534037 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:45 crc kubenswrapper[4675]: E1125 12:29:45.534088 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:45 crc kubenswrapper[4675]: E1125 12:29:45.534139 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:45 crc kubenswrapper[4675]: E1125 12:29:45.670924 4675 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 12:29:46 crc kubenswrapper[4675]: I1125 12:29:46.531766 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:46 crc kubenswrapper[4675]: E1125 12:29:46.531931 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:47 crc kubenswrapper[4675]: I1125 12:29:47.532348 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:47 crc kubenswrapper[4675]: E1125 12:29:47.534108 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:47 crc kubenswrapper[4675]: I1125 12:29:47.532431 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:47 crc kubenswrapper[4675]: E1125 12:29:47.534256 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:47 crc kubenswrapper[4675]: I1125 12:29:47.532425 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:47 crc kubenswrapper[4675]: E1125 12:29:47.534359 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:48 crc kubenswrapper[4675]: I1125 12:29:48.531573 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:48 crc kubenswrapper[4675]: E1125 12:29:48.531747 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:49 crc kubenswrapper[4675]: I1125 12:29:49.531443 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:49 crc kubenswrapper[4675]: I1125 12:29:49.531484 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:49 crc kubenswrapper[4675]: E1125 12:29:49.531657 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 12:29:49 crc kubenswrapper[4675]: I1125 12:29:49.532036 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:49 crc kubenswrapper[4675]: E1125 12:29:49.532167 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 12:29:49 crc kubenswrapper[4675]: E1125 12:29:49.532475 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 12:29:50 crc kubenswrapper[4675]: I1125 12:29:50.532200 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:50 crc kubenswrapper[4675]: E1125 12:29:50.533084 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-whffq" podUID="134942f4-79a7-4b14-9f21-ae027d146b44" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.532289 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.532289 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.532908 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.534845 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.535187 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.536463 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 12:29:51 crc kubenswrapper[4675]: I1125 12:29:51.537630 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 12:29:52 crc kubenswrapper[4675]: I1125 12:29:52.532040 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:29:52 crc kubenswrapper[4675]: I1125 12:29:52.534182 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 12:29:52 crc kubenswrapper[4675]: I1125 12:29:52.535956 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.877743 4675 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.931766 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.932455 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.932640 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.933162 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.934551 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.934874 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.936659 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.937259 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.938008 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wh2qp"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.938523 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.940117 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-fmvk5"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.940469 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.941292 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpfvq"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.941674 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.942956 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qb49w"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.943360 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.959251 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-j2gbr"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.959920 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.967641 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp"] Nov 25 12:29:57 crc kubenswrapper[4675]: I1125 12:29:57.968276 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.996499 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"etcd-serving-ca": failed to list *v1.ConfigMap: configmaps "etcd-serving-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.996534 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.996590 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.996545 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"etcd-serving-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"etcd-serving-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.996655 4675 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv": failed to list *v1.Secret: secrets "openshift-apiserver-operator-dockercfg-xtcjv" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.996673 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-xtcjv\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-dockercfg-xtcjv\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.996944 4675 reflector.go:561] object-"openshift-authentication-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.996967 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.997097 4675 reflector.go:561] object-"openshift-apiserver"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.997116 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.997099 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-serving-cert": failed to list *v1.Secret: secrets "v4-0-config-system-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.997138 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-system-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:57 crc kubenswrapper[4675]: W1125 12:29:57.997126 4675 reflector.go:561] object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc": failed to list *v1.Secret: secrets "oauth-openshift-dockercfg-znhcc" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:57 crc kubenswrapper[4675]: E1125 12:29:57.997166 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"oauth-openshift-dockercfg-znhcc\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"oauth-openshift-dockercfg-znhcc\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001317 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001341 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/62f92a86-663f-4101-9429-ffcd900bef67-node-pullsecrets\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001357 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6xdx\" (UniqueName: \"kubernetes.io/projected/62f92a86-663f-4101-9429-ffcd900bef67-kube-api-access-s6xdx\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001377 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-encryption-config\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001397 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001422 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001436 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001454 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001467 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-serving-cert\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001481 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-config\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001495 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xftds\" (UniqueName: \"kubernetes.io/projected/a4f386bf-2d54-4294-9a96-143e59a150ed-kube-api-access-xftds\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001511 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62f92a86-663f-4101-9429-ffcd900bef67-audit-dir\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001531 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-dir\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001546 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001560 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001577 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-encryption-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001591 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001605 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001621 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001636 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001651 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdd6k\" (UniqueName: \"kubernetes.io/projected/6737ff6f-0804-452d-80b7-b8fc21a6419e-kube-api-access-rdd6k\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001666 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001680 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001697 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001712 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-client-ca\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001726 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001739 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001753 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001768 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ssch\" (UniqueName: \"kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001782 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2351e3a8-bdfe-4a50-b234-fe4a84b82169-config\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001799 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001832 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001848 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjqdf\" (UniqueName: \"kubernetes.io/projected/962d971d-f0de-4d22-a854-e4a65644b9b8-kube-api-access-bjqdf\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001864 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001877 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001897 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001912 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/db01a316-423a-4238-8a5b-9839aaac33ff-audit-dir\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001927 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhtps\" (UniqueName: \"kubernetes.io/projected/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-kube-api-access-jhtps\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001941 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jsz5\" (UniqueName: \"kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001955 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001968 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001982 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f386bf-2d54-4294-9a96-143e59a150ed-serving-cert\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.001997 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwxw5\" (UniqueName: \"kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002011 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002025 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glvxs\" (UniqueName: \"kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002039 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002053 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/25770358-1610-4bfe-bff7-4acf81e687e8-available-featuregates\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002066 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8d52\" (UniqueName: \"kubernetes.io/projected/25770358-1610-4bfe-bff7-4acf81e687e8-kube-api-access-t8d52\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002082 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2351e3a8-bdfe-4a50-b234-fe4a84b82169-auth-proxy-config\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002096 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002110 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002125 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002139 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002154 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002167 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002180 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002194 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-etcd-serving-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002216 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2351e3a8-bdfe-4a50-b234-fe4a84b82169-machine-approver-tls\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002229 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002243 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.002257 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.003357 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"etcd-client": failed to list *v1.Secret: secrets "etcd-client" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.003391 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"etcd-client\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"etcd-client\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.003446 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"encryption-config-1": failed to list *v1.Secret: secrets "encryption-config-1" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.003461 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"encryption-config-1\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"encryption-config-1\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.003506 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"audit-1": failed to list *v1.ConfigMap: configmaps "audit-1" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.003522 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"audit-1\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"audit-1\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.005811 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-cliconfig": failed to list *v1.ConfigMap: configmaps "v4-0-config-system-cliconfig" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.005856 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"v4-0-config-system-cliconfig\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.005901 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-user-template-error": failed to list *v1.Secret: secrets "v4-0-config-user-template-error" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.005906 4675 reflector.go:561] object-"openshift-apiserver"/"image-import-ca": failed to list *v1.ConfigMap: configmaps "image-import-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.005912 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-user-template-error\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-user-template-error\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.005923 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"image-import-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"image-import-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.005941 4675 reflector.go:561] object-"openshift-apiserver"/"etcd-serving-ca": failed to list *v1.ConfigMap: configmaps "etcd-serving-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.005951 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"etcd-serving-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"etcd-serving-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.005982 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006011 4675 reflector.go:561] object-"openshift-apiserver"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006022 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.006067 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006127 4675 reflector.go:561] object-"openshift-apiserver"/"etcd-client": failed to list *v1.Secret: secrets "etcd-client" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006141 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"etcd-client\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"etcd-client\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006184 4675 reflector.go:561] object-"openshift-authentication-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006189 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006196 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006206 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006229 4675 reflector.go:561] object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj": failed to list *v1.Secret: secrets "authentication-operator-dockercfg-mz9bj" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006240 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-mz9bj\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"authentication-operator-dockercfg-mz9bj\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006257 4675 reflector.go:561] object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c": failed to list *v1.Secret: secrets "openshift-controller-manager-sa-dockercfg-msq4c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006272 4675 reflector.go:561] object-"openshift-authentication-operator"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006271 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-msq4c\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-sa-dockercfg-msq4c\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006284 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.005985 4675 reflector.go:561] object-"openshift-apiserver"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006301 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006342 4675 reflector.go:561] object-"openshift-apiserver"/"encryption-config-1": failed to list *v1.Secret: secrets "encryption-config-1" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006352 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"encryption-config-1\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"encryption-config-1\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006343 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-service-ca": failed to list *v1.ConfigMap: configmaps "v4-0-config-system-service-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006367 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-service-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"v4-0-config-system-service-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006379 4675 reflector.go:561] object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff": failed to list *v1.Secret: secrets "openshift-apiserver-sa-dockercfg-djjff" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006397 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-djjff\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-sa-dockercfg-djjff\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006419 4675 reflector.go:561] object-"openshift-authentication"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006429 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006441 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template": failed to list *v1.Secret: secrets "v4-0-config-system-ocp-branding-template" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006454 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-system-ocp-branding-template\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006462 4675 reflector.go:561] object-"openshift-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006471 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006495 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006510 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006500 4675 reflector.go:561] object-"openshift-machine-api"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006531 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006521 4675 reflector.go:561] object-"openshift-authentication"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006549 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006542 4675 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-tls": failed to list *v1.Secret: secrets "machine-api-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006562 4675 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7": failed to list *v1.Secret: secrets "machine-api-operator-dockercfg-mfbb7" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006568 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-api-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006572 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-mfbb7\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-api-operator-dockercfg-mfbb7\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006603 4675 reflector.go:561] object-"openshift-authentication-operator"/"service-ca-bundle": failed to list *v1.ConfigMap: configmaps "service-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006617 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"service-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"service-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006646 4675 reflector.go:561] object-"openshift-controller-manager"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006657 4675 reflector.go:561] object-"openshift-controller-manager"/"openshift-global-ca": failed to list *v1.ConfigMap: configmaps "openshift-global-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006661 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006668 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-global-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-global-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006701 4675 reflector.go:561] object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006711 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006703 4675 reflector.go:561] object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z": failed to list *v1.Secret: secrets "openshift-config-operator-dockercfg-7pc5z" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006726 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-7pc5z\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-config-operator-dockercfg-7pc5z\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006733 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-router-certs": failed to list *v1.Secret: secrets "v4-0-config-system-router-certs" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006747 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-router-certs\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-system-router-certs\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006758 4675 reflector.go:561] object-"openshift-controller-manager"/"client-ca": failed to list *v1.ConfigMap: configmaps "client-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006767 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"client-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"client-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006792 4675 reflector.go:561] object-"openshift-config-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006805 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006796 4675 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert": failed to list *v1.Secret: secrets "openshift-apiserver-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006840 4675 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-images": failed to list *v1.ConfigMap: configmaps "machine-api-operator-images" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006845 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006851 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-images\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"machine-api-operator-images\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006889 4675 reflector.go:561] object-"openshift-apiserver-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006900 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006933 4675 reflector.go:561] object-"openshift-controller-manager"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006943 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.006984 4675 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.006994 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007028 4675 reflector.go:561] object-"openshift-machine-api"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007038 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007064 4675 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config": failed to list *v1.ConfigMap: configmaps "openshift-apiserver-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007074 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-apiserver-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007110 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "v4-0-config-system-trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007119 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"v4-0-config-system-trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007175 4675 reflector.go:561] object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq": failed to list *v1.Secret: secrets "oauth-apiserver-sa-dockercfg-6r2bq" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007187 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"oauth-apiserver-sa-dockercfg-6r2bq\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"oauth-apiserver-sa-dockercfg-6r2bq\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007271 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007325 4675 reflector.go:561] object-"openshift-authentication-operator"/"authentication-operator-config": failed to list *v1.ConfigMap: configmaps "authentication-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007336 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"authentication-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"authentication-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007349 4675 reflector.go:561] object-"openshift-apiserver"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007353 4675 reflector.go:561] object-"openshift-apiserver"/"audit-1": failed to list *v1.ConfigMap: configmaps "audit-1" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007373 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"audit-1\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"audit-1\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007358 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007423 4675 reflector.go:561] object-"openshift-authentication"/"audit": failed to list *v1.ConfigMap: configmaps "audit" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007434 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"audit\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"audit\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007472 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007489 4675 reflector.go:561] object-"openshift-controller-manager"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007503 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007539 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007598 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007539 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data": failed to list *v1.Secret: secrets "v4-0-config-user-idp-0-file-data" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007632 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-user-idp-0-file-data\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007656 4675 reflector.go:561] object-"openshift-machine-api"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007665 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007714 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007761 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-user-template-provider-selection": failed to list *v1.Secret: secrets "v4-0-config-user-template-provider-selection" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007775 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-user-template-provider-selection\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007805 4675 reflector.go:561] object-"openshift-apiserver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007832 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007844 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007888 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.007906 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007715 4675 reflector.go:561] object-"openshift-authentication-operator"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007958 4675 reflector.go:561] object-"openshift-config-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007967 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007971 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.007979 4675 reflector.go:561] object-"openshift-config-operator"/"config-operator-serving-cert": failed to list *v1.Secret: secrets "config-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.007989 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"config-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"config-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.008075 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.008088 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.009842 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-system-session": failed to list *v1.Secret: secrets "v4-0-config-system-session" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.009884 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-session\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-system-session\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.014417 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wh2qp"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.014470 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf"] Nov 25 12:29:58 crc kubenswrapper[4675]: W1125 12:29:58.020070 4675 reflector.go:561] object-"openshift-authentication"/"v4-0-config-user-template-login": failed to list *v1.Secret: secrets "v4-0-config-user-template-login" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication": no relationship found between node 'crc' and this object Nov 25 12:29:58 crc kubenswrapper[4675]: E1125 12:29:58.020149 4675 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-user-template-login\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"v4-0-config-user-template-login\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.028504 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-9ftpx"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.029127 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.035666 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.050417 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.050714 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.053104 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.053121 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.076147 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.078670 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.078785 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.079208 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.081905 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-f78nf"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.082026 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.082276 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.082301 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpfvq"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.083441 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.083573 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qb49w"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.084248 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.084267 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-r2pgw"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.084251 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.084764 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.085345 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5sbb6"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.085733 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.086066 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.086767 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.091051 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.091470 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-fmvk5"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.091491 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.091568 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.092158 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.092631 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.092773 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.092978 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.093086 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.093200 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.093318 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.098543 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.099016 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.099218 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.099508 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.099641 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.099768 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.099902 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.100022 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.100134 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.101202 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.101330 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.101428 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.101460 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.101952 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102050 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102593 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102628 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102649 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102665 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-encryption-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102690 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-dir\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102705 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102721 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102739 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102755 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102770 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdd6k\" (UniqueName: \"kubernetes.io/projected/6737ff6f-0804-452d-80b7-b8fc21a6419e-kube-api-access-rdd6k\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102786 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102800 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102847 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102868 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-client-ca\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102892 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102912 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2351e3a8-bdfe-4a50-b234-fe4a84b82169-config\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102932 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102956 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102972 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ssch\" (UniqueName: \"kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.102987 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103008 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/970223cc-6160-49e3-be97-991ce6b00a50-config\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103024 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjqdf\" (UniqueName: \"kubernetes.io/projected/962d971d-f0de-4d22-a854-e4a65644b9b8-kube-api-access-bjqdf\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103040 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103062 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103086 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103105 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103121 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhtps\" (UniqueName: \"kubernetes.io/projected/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-kube-api-access-jhtps\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103136 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jsz5\" (UniqueName: \"kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103155 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103174 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/db01a316-423a-4238-8a5b-9839aaac33ff-audit-dir\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103195 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103215 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f386bf-2d54-4294-9a96-143e59a150ed-serving-cert\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103236 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwxw5\" (UniqueName: \"kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103254 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103269 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glvxs\" (UniqueName: \"kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103285 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8d52\" (UniqueName: \"kubernetes.io/projected/25770358-1610-4bfe-bff7-4acf81e687e8-kube-api-access-t8d52\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103302 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103319 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/25770358-1610-4bfe-bff7-4acf81e687e8-available-featuregates\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103335 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2351e3a8-bdfe-4a50-b234-fe4a84b82169-auth-proxy-config\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103350 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103366 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103382 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103398 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103414 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103432 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103449 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103464 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-etcd-serving-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103481 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2351e3a8-bdfe-4a50-b234-fe4a84b82169-machine-approver-tls\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103494 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103517 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103532 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103550 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103564 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/62f92a86-663f-4101-9429-ffcd900bef67-node-pullsecrets\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103577 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6xdx\" (UniqueName: \"kubernetes.io/projected/62f92a86-663f-4101-9429-ffcd900bef67-kube-api-access-s6xdx\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103594 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82nss\" (UniqueName: \"kubernetes.io/projected/970223cc-6160-49e3-be97-991ce6b00a50-kube-api-access-82nss\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103623 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-encryption-config\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103638 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103654 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lx8k\" (UniqueName: \"kubernetes.io/projected/c2202113-3df6-462c-b245-1407533fa7ca-kube-api-access-5lx8k\") pod \"cluster-samples-operator-665b6dd947-5582k\" (UID: \"c2202113-3df6-462c-b245-1407533fa7ca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103669 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/970223cc-6160-49e3-be97-991ce6b00a50-trusted-ca\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103684 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c2202113-3df6-462c-b245-1407533fa7ca-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5582k\" (UID: \"c2202113-3df6-462c-b245-1407533fa7ca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103712 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103729 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103746 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-serving-cert\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103761 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-config\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103776 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xftds\" (UniqueName: \"kubernetes.io/projected/a4f386bf-2d54-4294-9a96-143e59a150ed-kube-api-access-xftds\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103791 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.107475 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2351e3a8-bdfe-4a50-b234-fe4a84b82169-auth-proxy-config\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103412 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-dir\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.107670 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-client-ca\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.107713 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/db01a316-423a-4238-8a5b-9839aaac33ff-audit-dir\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.107895 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/62f92a86-663f-4101-9429-ffcd900bef67-node-pullsecrets\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.103806 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62f92a86-663f-4101-9429-ffcd900bef67-audit-dir\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.108027 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/970223cc-6160-49e3-be97-991ce6b00a50-serving-cert\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.108173 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2351e3a8-bdfe-4a50-b234-fe4a84b82169-config\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.109523 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/25770358-1610-4bfe-bff7-4acf81e687e8-available-featuregates\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.109543 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-config\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.109723 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62f92a86-663f-4101-9429-ffcd900bef67-audit-dir\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.113326 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.114779 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.117804 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.121350 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.121906 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.124945 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2351e3a8-bdfe-4a50-b234-fe4a84b82169-machine-approver-tls\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.126876 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.127465 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.128099 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.128575 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-fmkdh"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.128956 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.129067 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.131781 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f386bf-2d54-4294-9a96-143e59a150ed-serving-cert\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.131866 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.132612 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9hj2h"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.132938 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.147201 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.147722 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.148043 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.150753 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.152234 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.157534 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.159285 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.167526 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.167930 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-682hw"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.169039 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-ztlr8"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.169347 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.169710 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-86gc4"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.171938 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.172222 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.173344 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.174101 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.174279 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-444qp"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.174585 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mqfhd"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.174925 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.175127 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-86gc4" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.175252 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.180371 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.185776 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.186479 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.191453 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vmnz8"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.192246 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.192720 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.192720 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.198281 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bwvrd"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.199041 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.200010 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.200177 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.200658 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-9ftpx"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.200725 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209400 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/970223cc-6160-49e3-be97-991ce6b00a50-serving-cert\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209445 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3f473477-4303-423d-842d-9d26d5715d78-node-bootstrap-token\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209480 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n7vk\" (UniqueName: \"kubernetes.io/projected/16a1feb6-e940-463b-b937-0b891e10e8fc-kube-api-access-2n7vk\") pod \"package-server-manager-789f6589d5-8vh6n\" (UID: \"16a1feb6-e940-463b-b937-0b891e10e8fc\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209543 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/16a1feb6-e940-463b-b937-0b891e10e8fc-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8vh6n\" (UID: \"16a1feb6-e940-463b-b937-0b891e10e8fc\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209570 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqp8c\" (UniqueName: \"kubernetes.io/projected/bfcc8e71-5649-48cd-a9bf-5105629e6d87-kube-api-access-sqp8c\") pod \"migrator-59844c95c7-qsjxt\" (UID: \"bfcc8e71-5649-48cd-a9bf-5105629e6d87\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209644 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmd5p\" (UniqueName: \"kubernetes.io/projected/3f473477-4303-423d-842d-9d26d5715d78-kube-api-access-mmd5p\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209685 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3f473477-4303-423d-842d-9d26d5715d78-certs\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209716 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/970223cc-6160-49e3-be97-991ce6b00a50-config\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.209970 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82nss\" (UniqueName: \"kubernetes.io/projected/970223cc-6160-49e3-be97-991ce6b00a50-kube-api-access-82nss\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.210041 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lx8k\" (UniqueName: \"kubernetes.io/projected/c2202113-3df6-462c-b245-1407533fa7ca-kube-api-access-5lx8k\") pod \"cluster-samples-operator-665b6dd947-5582k\" (UID: \"c2202113-3df6-462c-b245-1407533fa7ca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.210082 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/970223cc-6160-49e3-be97-991ce6b00a50-trusted-ca\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.210105 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c2202113-3df6-462c-b245-1407533fa7ca-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5582k\" (UID: \"c2202113-3df6-462c-b245-1407533fa7ca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.211491 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/970223cc-6160-49e3-be97-991ce6b00a50-config\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.215224 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/970223cc-6160-49e3-be97-991ce6b00a50-serving-cert\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.215743 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/970223cc-6160-49e3-be97-991ce6b00a50-trusted-ca\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.217920 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.224541 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c2202113-3df6-462c-b245-1407533fa7ca-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5582k\" (UID: \"c2202113-3df6-462c-b245-1407533fa7ca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.228896 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-j2gbr"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.228944 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.229966 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.232884 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-f78nf"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.232949 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5sbb6"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.235581 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.235624 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.235833 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.237695 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.237736 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.239186 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.239954 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.243704 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7dbvr"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.244846 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.245214 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.246419 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-r2pgw"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.248616 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-86gc4"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.250990 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.254259 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.254681 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.257174 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.257771 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9hj2h"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.258800 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.274576 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.275782 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.276929 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.281527 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.282582 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-444qp"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.284115 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.284671 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vmnz8"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.287562 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bwvrd"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.287599 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mqfhd"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.288604 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.289738 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7dbvr"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.291091 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-682hw"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.292457 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-6579r"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.293529 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-6579r"] Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.294025 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.294135 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.310707 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3f473477-4303-423d-842d-9d26d5715d78-node-bootstrap-token\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.310758 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n7vk\" (UniqueName: \"kubernetes.io/projected/16a1feb6-e940-463b-b937-0b891e10e8fc-kube-api-access-2n7vk\") pod \"package-server-manager-789f6589d5-8vh6n\" (UID: \"16a1feb6-e940-463b-b937-0b891e10e8fc\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.310837 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/16a1feb6-e940-463b-b937-0b891e10e8fc-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8vh6n\" (UID: \"16a1feb6-e940-463b-b937-0b891e10e8fc\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.310861 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqp8c\" (UniqueName: \"kubernetes.io/projected/bfcc8e71-5649-48cd-a9bf-5105629e6d87-kube-api-access-sqp8c\") pod \"migrator-59844c95c7-qsjxt\" (UID: \"bfcc8e71-5649-48cd-a9bf-5105629e6d87\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.310929 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3f473477-4303-423d-842d-9d26d5715d78-certs\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.310953 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmd5p\" (UniqueName: \"kubernetes.io/projected/3f473477-4303-423d-842d-9d26d5715d78-kube-api-access-mmd5p\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.339733 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.355444 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.374702 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.395050 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.414688 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.574694 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.649178 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xftds\" (UniqueName: \"kubernetes.io/projected/a4f386bf-2d54-4294-9a96-143e59a150ed-kube-api-access-xftds\") pod \"route-controller-manager-6576b87f9c-mpxxs\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.654578 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.675576 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.695068 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.715141 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.735335 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.755097 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.765523 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3f473477-4303-423d-842d-9d26d5715d78-node-bootstrap-token\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.775248 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.785458 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3f473477-4303-423d-842d-9d26d5715d78-certs\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.795490 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.815223 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.835473 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.855761 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.870305 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.876187 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.895038 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.906657 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/16a1feb6-e940-463b-b937-0b891e10e8fc-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8vh6n\" (UID: \"16a1feb6-e940-463b-b937-0b891e10e8fc\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.916604 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.936032 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.954993 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.983146 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 12:29:58 crc kubenswrapper[4675]: I1125 12:29:58.997991 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.014775 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.035440 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.057044 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.065890 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs"] Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.074641 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.094844 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103561 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103614 4675 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103699 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.603631924 +0000 UTC m=+144.775224265 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103640 4675 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103646 4675 secret.go:188] Couldn't get secret openshift-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103661 4675 configmap.go:193] Couldn't get configMap openshift-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103660 4675 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103676 4675 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103859 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-config podName:a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.60384053 +0000 UTC m=+144.775432871 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-config") pod "openshift-apiserver-operator-796bbdcf4f-njjzf" (UID: "a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103877 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.603869531 +0000 UTC m=+144.775461942 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103899 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-encryption-config podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.603892521 +0000 UTC m=+144.775484932 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-encryption-config") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103911 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.603906302 +0000 UTC m=+144.775498733 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103922 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.603917272 +0000 UTC m=+144.775509693 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.103941 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.603934703 +0000 UTC m=+144.775527124 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107370 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107443 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607427697 +0000 UTC m=+144.779020038 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107546 4675 configmap.go:193] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107594 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607585262 +0000 UTC m=+144.779177603 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107614 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107633 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607628393 +0000 UTC m=+144.779220734 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107637 4675 configmap.go:193] Couldn't get configMap openshift-machine-api/machine-api-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107673 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607661994 +0000 UTC m=+144.779254395 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107697 4675 configmap.go:193] Couldn't get configMap openshift-controller-manager/openshift-global-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107717 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607709786 +0000 UTC m=+144.779302127 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107736 4675 configmap.go:193] Couldn't get configMap openshift-authentication/audit: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107754 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607749007 +0000 UTC m=+144.779341348 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107804 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107904 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607893602 +0000 UTC m=+144.779485933 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107929 4675 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107947 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607941693 +0000 UTC m=+144.779534034 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107959 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.107986 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.607971864 +0000 UTC m=+144.779564205 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108003 4675 configmap.go:193] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108022 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608014646 +0000 UTC m=+144.779606987 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108032 4675 secret.go:188] Couldn't get secret openshift-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108048 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608044187 +0000 UTC m=+144.779636528 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108067 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108082 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608077408 +0000 UTC m=+144.779669749 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108104 4675 configmap.go:193] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108119 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608115479 +0000 UTC m=+144.779707820 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108132 4675 secret.go:188] Couldn't get secret openshift-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108152 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-serving-cert podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.60814368 +0000 UTC m=+144.779736021 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-serving-cert") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108167 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108184 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608179771 +0000 UTC m=+144.779772112 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108195 4675 secret.go:188] Couldn't get secret openshift-oauth-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108211 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608206392 +0000 UTC m=+144.779798733 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108224 4675 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108238 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-session: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108257 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608252224 +0000 UTC m=+144.779844565 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108296 4675 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108302 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608292185 +0000 UTC m=+144.779884526 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108317 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608311846 +0000 UTC m=+144.779904187 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108324 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/image-import-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108331 4675 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108349 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert podName:a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608344917 +0000 UTC m=+144.779937258 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-njjzf" (UID: "a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108369 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608362947 +0000 UTC m=+144.779955288 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108370 4675 configmap.go:193] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108391 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108397 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608392328 +0000 UTC m=+144.779984669 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108377 4675 secret.go:188] Couldn't get secret openshift-authentication-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108409 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-etcd-serving-ca podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608404119 +0000 UTC m=+144.779996460 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-etcd-serving-ca") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108490 4675 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108522 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608515362 +0000 UTC m=+144.780107703 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108442 4675 secret.go:188] Couldn't get secret openshift-oauth-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108537 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608529893 +0000 UTC m=+144.780122234 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108550 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608545443 +0000 UTC m=+144.780137774 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108451 4675 secret.go:188] Couldn't get secret openshift-machine-api/machine-api-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108621 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608615066 +0000 UTC m=+144.780207407 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108458 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108644 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608639516 +0000 UTC m=+144.780231857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108465 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.108686 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.608679108 +0000 UTC m=+144.780271449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.109783 4675 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.109915 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-trusted-ca-bundle podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.609868606 +0000 UTC m=+144.781460947 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-trusted-ca-bundle") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.109950 4675 secret.go:188] Couldn't get secret openshift-config-operator/config-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.109983 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert podName:25770358-1610-4bfe-bff7-4acf81e687e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.609975019 +0000 UTC m=+144.781567350 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert") pod "openshift-config-operator-7777fb866f-rgkhp" (UID: "25770358-1610-4bfe-bff7-4acf81e687e8") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.110045 4675 secret.go:188] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.110089 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-encryption-config podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.610062312 +0000 UTC m=+144.781654653 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-encryption-config") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.116191 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.116263 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:29:59.616245133 +0000 UTC m=+144.787837524 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.116574 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.134116 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.155032 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.173131 4675 request.go:700] Waited for 1.000710335s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/configmaps?fieldSelector=metadata.name%3Dcollect-profiles-config&limit=500&resourceVersion=0 Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.174312 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.179596 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" event={"ID":"a4f386bf-2d54-4294-9a96-143e59a150ed","Type":"ContainerStarted","Data":"9e7480a36376e7b7b0798cc431b2d8e7b6afbf1a44352defb4d0f04c54720279"} Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.179647 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" event={"ID":"a4f386bf-2d54-4294-9a96-143e59a150ed","Type":"ContainerStarted","Data":"fc57b4bd59d926944caabd2c45da75340d753281c766391afe628bae7c05ca58"} Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.180064 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.181785 4675 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-mpxxs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.181831 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" podUID="a4f386bf-2d54-4294-9a96-143e59a150ed" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.194746 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.215394 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.234856 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.255902 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.275539 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.294853 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.314593 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.334868 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.355678 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.375036 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.395100 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.417421 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.434853 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.446781 4675 projected.go:288] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.455242 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.465025 4675 projected.go:288] Couldn't get configMap openshift-authentication/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.475664 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.487325 4675 projected.go:288] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.495449 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.505984 4675 projected.go:288] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.515108 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.524801 4675 projected.go:288] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.535419 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.545235 4675 projected.go:288] Couldn't get configMap openshift-cluster-machine-approver/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.545330 4675 projected.go:194] Error preparing data for projected volume kube-api-access-glvxs for pod openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.545403 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs podName:2351e3a8-bdfe-4a50-b234-fe4a84b82169 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:00.045382801 +0000 UTC m=+145.216975152 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-glvxs" (UniqueName: "kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs") pod "machine-approver-56656f9798-zkcp6" (UID: "2351e3a8-bdfe-4a50-b234-fe4a84b82169") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.555638 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.565721 4675 projected.go:288] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.575531 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.597875 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.607381 4675 projected.go:288] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.616431 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.625291 4675 projected.go:288] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.625316 4675 projected.go:194] Error preparing data for projected volume kube-api-access-nwxw5 for pod openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: E1125 12:29:59.625376 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5 podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:30:00.12535758 +0000 UTC m=+145.296949931 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-nwxw5" (UniqueName: "kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629451 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629491 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629516 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629537 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629553 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629571 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629586 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629698 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629731 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629810 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629877 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629925 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.629952 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630102 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630135 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630215 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630273 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630390 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630441 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630469 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630571 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630602 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630624 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630644 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630666 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-etcd-serving-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630703 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630728 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630749 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630771 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630862 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-encryption-config\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630898 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630940 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630963 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.630987 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.631008 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-serving-cert\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.631047 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.631071 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.631094 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-encryption-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.631125 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.631147 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.634763 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.654506 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.675490 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.695404 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.736120 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.755023 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.776115 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.795065 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.815070 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.835143 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.854871 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.875322 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.895048 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.915507 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.935805 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.954923 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.975371 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 12:29:59 crc kubenswrapper[4675]: I1125 12:29:59.995612 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.014719 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.036245 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.054730 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.093136 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82nss\" (UniqueName: \"kubernetes.io/projected/970223cc-6160-49e3-be97-991ce6b00a50-kube-api-access-82nss\") pod \"console-operator-58897d9998-9ftpx\" (UID: \"970223cc-6160-49e3-be97-991ce6b00a50\") " pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.111077 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lx8k\" (UniqueName: \"kubernetes.io/projected/c2202113-3df6-462c-b245-1407533fa7ca-kube-api-access-5lx8k\") pod \"cluster-samples-operator-665b6dd947-5582k\" (UID: \"c2202113-3df6-462c-b245-1407533fa7ca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.118880 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.126425 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft"] Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.126925 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config-volume kube-api-access-zlbdr secret-volume], unattached volumes=[], failed to process volumes=[config-volume kube-api-access-zlbdr secret-volume]: context canceled" pod="openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft" podUID="ba36f8b8-3b33-4302-9f4e-8f1b34a901ff" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.137436 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg"] Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.138195 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.138309 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwxw5\" (UniqueName: \"kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.138354 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glvxs\" (UniqueName: \"kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.139450 4675 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.150019 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg"] Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.158292 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.161563 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.174059 4675 request.go:700] Waited for 1.879697962s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/secrets?fieldSelector=metadata.name%3Ddefault-dockercfg-2llfx&limit=500&resourceVersion=0 Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.177705 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.179091 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.183753 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.189891 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.196578 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.209743 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.215802 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.236022 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.279717 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmd5p\" (UniqueName: \"kubernetes.io/projected/3f473477-4303-423d-842d-9d26d5715d78-kube-api-access-mmd5p\") pod \"machine-config-server-fmkdh\" (UID: \"3f473477-4303-423d-842d-9d26d5715d78\") " pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.301726 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqp8c\" (UniqueName: \"kubernetes.io/projected/bfcc8e71-5649-48cd-a9bf-5105629e6d87-kube-api-access-sqp8c\") pod \"migrator-59844c95c7-qsjxt\" (UID: \"bfcc8e71-5649-48cd-a9bf-5105629e6d87\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.312665 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n7vk\" (UniqueName: \"kubernetes.io/projected/16a1feb6-e940-463b-b937-0b891e10e8fc-kube-api-access-2n7vk\") pod \"package-server-manager-789f6589d5-8vh6n\" (UID: \"16a1feb6-e940-463b-b937-0b891e10e8fc\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.332873 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-9ftpx"] Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.335435 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.352893 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.354575 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.375017 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.384366 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.385893 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-fmkdh" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.396042 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.403048 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.404893 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k"] Nov 25 12:30:00 crc kubenswrapper[4675]: W1125 12:30:00.406074 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f473477_4303_423d_842d_9d26d5715d78.slice/crio-79802daa5c2884896bbee2dc06a367b3ffd8f1a6bec1fe65de41ef2a78946874 WatchSource:0}: Error finding container 79802daa5c2884896bbee2dc06a367b3ffd8f1a6bec1fe65de41ef2a78946874: Status 404 returned error can't find the container with id 79802daa5c2884896bbee2dc06a367b3ffd8f1a6bec1fe65de41ef2a78946874 Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.406091 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.416108 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.425507 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.435927 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.448113 4675 projected.go:288] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.448674 4675 projected.go:194] Error preparing data for projected volume kube-api-access-rdd6k for pod openshift-authentication-operator/authentication-operator-69f744f599-fmvk5: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.448765 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6737ff6f-0804-452d-80b7-b8fc21a6419e-kube-api-access-rdd6k podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:30:00.948739732 +0000 UTC m=+146.120332273 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rdd6k" (UniqueName: "kubernetes.io/projected/6737ff6f-0804-452d-80b7-b8fc21a6419e-kube-api-access-rdd6k") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.448939 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-encryption-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.459712 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.465174 4675 projected.go:288] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.465205 4675 projected.go:194] Error preparing data for projected volume kube-api-access-9ssch for pod openshift-authentication/oauth-openshift-558db77b4-qb49w: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.465286 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:00.965259798 +0000 UTC m=+146.136852139 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-9ssch" (UniqueName: "kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.468430 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-serving-cert\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.477307 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.484714 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.488017 4675 projected.go:288] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.488054 4675 projected.go:194] Error preparing data for projected volume kube-api-access-bjqdf for pod openshift-machine-api/machine-api-operator-5694c8668f-wh2qp: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.488124 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/962d971d-f0de-4d22-a854-e4a65644b9b8-kube-api-access-bjqdf podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:00.988102141 +0000 UTC m=+146.159694482 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-bjqdf" (UniqueName: "kubernetes.io/projected/962d971d-f0de-4d22-a854-e4a65644b9b8-kube-api-access-bjqdf") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.494584 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.504626 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-encryption-config\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.506463 4675 projected.go:288] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.506498 4675 projected.go:194] Error preparing data for projected volume kube-api-access-jhtps for pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.506554 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-kube-api-access-jhtps podName:a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.00653406 +0000 UTC m=+146.178126401 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-jhtps" (UniqueName: "kubernetes.io/projected/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-kube-api-access-jhtps") pod "openshift-apiserver-operator-796bbdcf4f-njjzf" (UID: "a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.520716 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.525369 4675 projected.go:288] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.525419 4675 projected.go:194] Error preparing data for projected volume kube-api-access-7jsz5 for pod openshift-controller-manager/controller-manager-879f6c89f-zpfvq: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.525509 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5 podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.025471926 +0000 UTC m=+146.197064267 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7jsz5" (UniqueName: "kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.532164 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.535796 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.558371 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.562697 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.566968 4675 projected.go:288] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.566996 4675 projected.go:194] Error preparing data for projected volume kube-api-access-t8d52 for pod openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.567179 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/25770358-1610-4bfe-bff7-4acf81e687e8-kube-api-access-t8d52 podName:25770358-1610-4bfe-bff7-4acf81e687e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.067069108 +0000 UTC m=+146.238661449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-t8d52" (UniqueName: "kubernetes.io/projected/25770358-1610-4bfe-bff7-4acf81e687e8-kube-api-access-t8d52") pod "openshift-config-operator-7777fb866f-rgkhp" (UID: "25770358-1610-4bfe-bff7-4acf81e687e8") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.576184 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.587426 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt"] Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.594981 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.602532 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-etcd-serving-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.608250 4675 projected.go:288] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.608280 4675 projected.go:194] Error preparing data for projected volume kube-api-access-s6xdx for pod openshift-apiserver/apiserver-76f77b778f-j2gbr: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.608344 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/62f92a86-663f-4101-9429-ffcd900bef67-kube-api-access-s6xdx podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.108325658 +0000 UTC m=+146.279917999 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s6xdx" (UniqueName: "kubernetes.io/projected/62f92a86-663f-4101-9429-ffcd900bef67-kube-api-access-s6xdx") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: W1125 12:30:00.610352 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbfcc8e71_5649_48cd_a9bf_5105629e6d87.slice/crio-ae9e7df79f5f151426f7c664fa6eb0c49ba0bed9a4ca0aa0c2426124ab88d175 WatchSource:0}: Error finding container ae9e7df79f5f151426f7c664fa6eb0c49ba0bed9a4ca0aa0c2426124ab88d175: Status 404 returned error can't find the container with id ae9e7df79f5f151426f7c664fa6eb0c49ba0bed9a4ca0aa0c2426124ab88d175 Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.616512 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.620928 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630320 4675 configmap.go:193] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630355 4675 configmap.go:193] Couldn't get configMap openshift-machine-api/machine-api-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630397 4675 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630422 4675 secret.go:188] Couldn't get secret openshift-oauth-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630427 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630401016 +0000 UTC m=+146.801993357 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630469 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630482 4675 secret.go:188] Couldn't get secret openshift-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630468 4675 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630528 4675 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630546 4675 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630567 4675 secret.go:188] Couldn't get secret openshift-machine-api/machine-api-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630491 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630435327 +0000 UTC m=+146.802027668 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630609 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630595292 +0000 UTC m=+146.802187633 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630609 4675 configmap.go:193] Couldn't get configMap openshift-controller-manager/openshift-global-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630659 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630685 4675 configmap.go:193] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630633 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630624303 +0000 UTC m=+146.802216864 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630635 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/image-import-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630712 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630664 4675 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630718 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630708196 +0000 UTC m=+146.802300537 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630784 4675 secret.go:188] Couldn't get secret openshift-authentication-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630791 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630781808 +0000 UTC m=+146.802374319 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630824 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-session: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630833 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630803849 +0000 UTC m=+146.802396180 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630854 4675 configmap.go:193] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630875 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.63084335 +0000 UTC m=+146.802435691 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630895 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630888852 +0000 UTC m=+146.802481193 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630877 4675 configmap.go:193] Couldn't get configMap openshift-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630905 4675 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630945 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630903682 +0000 UTC m=+146.802496243 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630969 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630957744 +0000 UTC m=+146.802550085 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630983 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630977305 +0000 UTC m=+146.802569646 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.630999 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.630990865 +0000 UTC m=+146.802583256 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631020 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631014256 +0000 UTC m=+146.802606597 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631038 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631028896 +0000 UTC m=+146.802621417 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631053 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert podName:a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631046497 +0000 UTC m=+146.802638838 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-njjzf" (UID: "a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631037 4675 secret.go:188] Couldn't get secret openshift-oauth-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631116 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631073 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631063597 +0000 UTC m=+146.802656149 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631229 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631198532 +0000 UTC m=+146.802791043 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631270 4675 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631284 4675 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631240 4675 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631343 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631313346 +0000 UTC m=+146.802905887 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631366 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631358007 +0000 UTC m=+146.802950348 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631384 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631377418 +0000 UTC m=+146.802969759 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631428 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config podName:962d971d-f0de-4d22-a854-e4a65644b9b8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631398668 +0000 UTC m=+146.802991249 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config") pod "machine-api-operator-5694c8668f-wh2qp" (UID: "962d971d-f0de-4d22-a854-e4a65644b9b8") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631450 4675 configmap.go:193] Couldn't get configMap openshift-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631481 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631472901 +0000 UTC m=+146.803065452 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631504 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631494212 +0000 UTC m=+146.803086773 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631510 4675 secret.go:188] Couldn't get secret openshift-config-operator/config-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631523 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig podName:db01a316-423a-4238-8a5b-9839aaac33ff nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631514142 +0000 UTC m=+146.803106693 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig") pod "oauth-openshift-558db77b4-qb49w" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631552 4675 configmap.go:193] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631566 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit podName:62f92a86-663f-4101-9429-ffcd900bef67 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631555634 +0000 UTC m=+146.803148155 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit") pod "apiserver-76f77b778f-j2gbr" (UID: "62f92a86-663f-4101-9429-ffcd900bef67") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631607 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config podName:9dfcfcb3-6d81-425b-98ae-925d3fbf2369 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631599635 +0000 UTC m=+146.803191976 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config") pod "controller-manager-879f6c89f-zpfvq" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631618 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert podName:25770358-1610-4bfe-bff7-4acf81e687e8 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631612916 +0000 UTC m=+146.803205247 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert") pod "openshift-config-operator-7777fb866f-rgkhp" (UID: "25770358-1610-4bfe-bff7-4acf81e687e8") : failed to sync secret cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: E1125 12:30:00.631629 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config podName:6737ff6f-0804-452d-80b7-b8fc21a6419e nodeName:}" failed. No retries permitted until 2025-11-25 12:30:01.631624626 +0000 UTC m=+146.803216967 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config") pod "authentication-operator-69f744f599-fmvk5" (UID: "6737ff6f-0804-452d-80b7-b8fc21a6419e") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.634706 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.649514 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n"] Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.656791 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 12:30:00 crc kubenswrapper[4675]: W1125 12:30:00.658990 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16a1feb6_e940_463b_b937_0b891e10e8fc.slice/crio-ca7e9b84ac41a0aa4698380922209c5d1603c7f57e85358346d0b7929877043e WatchSource:0}: Error finding container ca7e9b84ac41a0aa4698380922209c5d1603c7f57e85358346d0b7929877043e: Status 404 returned error can't find the container with id ca7e9b84ac41a0aa4698380922209c5d1603c7f57e85358346d0b7929877043e Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.675791 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.707629 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.715200 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.735144 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.756012 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.776897 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.797965 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.816499 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.849081 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.855582 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.875831 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.895642 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.915734 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.935671 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.949364 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdd6k\" (UniqueName: \"kubernetes.io/projected/6737ff6f-0804-452d-80b7-b8fc21a6419e-kube-api-access-rdd6k\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.954989 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.975807 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 12:30:00 crc kubenswrapper[4675]: I1125 12:30:00.996045 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.015744 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.050295 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ssch\" (UniqueName: \"kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.050343 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjqdf\" (UniqueName: \"kubernetes.io/projected/962d971d-f0de-4d22-a854-e4a65644b9b8-kube-api-access-bjqdf\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.050386 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhtps\" (UniqueName: \"kubernetes.io/projected/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-kube-api-access-jhtps\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.050404 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jsz5\" (UniqueName: \"kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.051866 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.056010 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.056803 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ssch\" (UniqueName: \"kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.057497 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhtps\" (UniqueName: \"kubernetes.io/projected/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-kube-api-access-jhtps\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.057702 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjqdf\" (UniqueName: \"kubernetes.io/projected/962d971d-f0de-4d22-a854-e4a65644b9b8-kube-api-access-bjqdf\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.081968 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.095245 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.115278 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.135792 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.139433 4675 projected.go:288] Couldn't get configMap openshift-cluster-machine-approver/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.139635 4675 projected.go:194] Error preparing data for projected volume kube-api-access-glvxs for pod openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.139793 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs podName:2351e3a8-bdfe-4a50-b234-fe4a84b82169 nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.139767931 +0000 UTC m=+147.311360272 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-glvxs" (UniqueName: "kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs") pod "machine-approver-56656f9798-zkcp6" (UID: "2351e3a8-bdfe-4a50-b234-fe4a84b82169") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.139470 4675 projected.go:288] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.139988 4675 projected.go:194] Error preparing data for projected volume kube-api-access-nwxw5 for pod openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8: failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.140134 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5 podName:4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.140121293 +0000 UTC m=+147.311713634 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-nwxw5" (UniqueName: "kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5") pod "apiserver-7bbb656c7d-9n5x8" (UID: "4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa") : failed to sync configmap cache: timed out waiting for the condition Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.151560 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8d52\" (UniqueName: \"kubernetes.io/projected/25770358-1610-4bfe-bff7-4acf81e687e8-kube-api-access-t8d52\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.151709 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6xdx\" (UniqueName: \"kubernetes.io/projected/62f92a86-663f-4101-9429-ffcd900bef67-kube-api-access-s6xdx\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.155263 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8d52\" (UniqueName: \"kubernetes.io/projected/25770358-1610-4bfe-bff7-4acf81e687e8-kube-api-access-t8d52\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.155962 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.176043 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.190095 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" event={"ID":"c2202113-3df6-462c-b245-1407533fa7ca","Type":"ContainerStarted","Data":"d90d4bb804df62bfe60a29d3b70e45ea527660430129172a87b20bc65a778b26"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.190145 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" event={"ID":"c2202113-3df6-462c-b245-1407533fa7ca","Type":"ContainerStarted","Data":"06222ed2eab5e80fc2bca77b27113ffd41b7920d685517d4196af748620c0069"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.190161 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" event={"ID":"c2202113-3df6-462c-b245-1407533fa7ca","Type":"ContainerStarted","Data":"95e1bf3f5ba3f07466507ef89d050604c2157b621dbddb6b4b63d51da34297c8"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.191602 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" event={"ID":"16a1feb6-e940-463b-b937-0b891e10e8fc","Type":"ContainerStarted","Data":"da49cf71a8b4167fd698fd951d337804317906a9aa1b91bded74c4098cfd78af"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.191658 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" event={"ID":"16a1feb6-e940-463b-b937-0b891e10e8fc","Type":"ContainerStarted","Data":"2dd3b5d2f1314662c9e1ba0ffbe8e49a8bcdaf3e1527d638d38133d30e9127e5"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.191671 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" event={"ID":"16a1feb6-e940-463b-b937-0b891e10e8fc","Type":"ContainerStarted","Data":"ca7e9b84ac41a0aa4698380922209c5d1603c7f57e85358346d0b7929877043e"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.191751 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.192928 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" event={"ID":"970223cc-6160-49e3-be97-991ce6b00a50","Type":"ContainerStarted","Data":"dc62c0e6bf7167295c1c42589a9fd6b04110218dcb79f3dade3bf313aae1b82f"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.193311 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.193568 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" event={"ID":"970223cc-6160-49e3-be97-991ce6b00a50","Type":"ContainerStarted","Data":"36ce950b7b2e6f1aea7623abde5ce65bf0826a43ede64aff8b31eb6f33f0e95b"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.193177 4675 request.go:700] Waited for 1.895952191s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-api/configmaps?fieldSelector=metadata.name%3Dmachine-api-operator-images&limit=500&resourceVersion=0 Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.193898 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-fmkdh" event={"ID":"3f473477-4303-423d-842d-9d26d5715d78","Type":"ContainerStarted","Data":"e27ff6acb0d0a315bed3b48126448900a8ae2d13acb11eb2f188914477bb726d"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.193931 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-fmkdh" event={"ID":"3f473477-4303-423d-842d-9d26d5715d78","Type":"ContainerStarted","Data":"79802daa5c2884896bbee2dc06a367b3ffd8f1a6bec1fe65de41ef2a78946874"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.194938 4675 patch_prober.go:28] interesting pod/console-operator-58897d9998-9ftpx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.195009 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" podUID="970223cc-6160-49e3-be97-991ce6b00a50" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.196165 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" event={"ID":"bfcc8e71-5649-48cd-a9bf-5105629e6d87","Type":"ContainerStarted","Data":"866b284ce81b910e6462cecfe2255faa0491db2eef1607843b5a52572a346be2"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.196192 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" event={"ID":"bfcc8e71-5649-48cd-a9bf-5105629e6d87","Type":"ContainerStarted","Data":"f4a4b381ff961c324a1e2946691729ab2c11f1c2aecc45b5d2f5b5f2660c2391"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.196203 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" event={"ID":"bfcc8e71-5649-48cd-a9bf-5105629e6d87","Type":"ContainerStarted","Data":"ae9e7df79f5f151426f7c664fa6eb0c49ba0bed9a4ca0aa0c2426124ab88d175"} Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.196258 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.197961 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.229755 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.235516 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.255157 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.275682 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.294863 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.315925 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.336126 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.345722 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jsz5\" (UniqueName: \"kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.355310 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.375502 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.394721 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.415173 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.435644 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.455688 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.477301 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.495226 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.516106 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.539243 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.546546 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6xdx\" (UniqueName: \"kubernetes.io/projected/62f92a86-663f-4101-9429-ffcd900bef67-kube-api-access-s6xdx\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.558392 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.575242 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.584270 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdd6k\" (UniqueName: \"kubernetes.io/projected/6737ff6f-0804-452d-80b7-b8fc21a6419e-kube-api-access-rdd6k\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.600194 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.658009 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-tls\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.658118 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.659166 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-image-import-ca\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.658148 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b12a716b-9585-42e4-88c8-aebd93d4f6de-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.659249 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.659279 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.659654 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b962c13b-37be-46d3-91ab-5fde6214ff03-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.659784 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.659958 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660083 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660195 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660333 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9eb7832-4c59-478c-997b-d0037c9e0abf-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660469 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-service-ca\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660577 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgldm\" (UniqueName: \"kubernetes.io/projected/e76ec096-df5f-4aa6-93d4-e2824b4dc454-kube-api-access-sgldm\") pod \"downloads-7954f5f757-f78nf\" (UID: \"e76ec096-df5f-4aa6-93d4-e2824b4dc454\") " pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660682 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660786 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-trusted-ca-bundle\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.660913 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661024 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e248d293-d2aa-4bfc-a575-24529404e90c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661138 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661247 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-service-ca\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661351 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlvcg\" (UniqueName: \"kubernetes.io/projected/412d82e0-9b92-45f4-8030-8f91fffe3e9a-kube-api-access-qlvcg\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661467 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkt74\" (UniqueName: \"kubernetes.io/projected/ad85cac4-4f5d-4801-a73f-8c7c01806627-kube-api-access-fkt74\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661612 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661723 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661846 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661956 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662067 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4a8572cc-2631-4b88-8a65-7b93f16951cb-profile-collector-cert\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662170 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2hsf\" (UniqueName: \"kubernetes.io/projected/4a8572cc-2631-4b88-8a65-7b93f16951cb-kube-api-access-q2hsf\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662281 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662390 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662488 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79030fb7-fcf1-4404-a300-e8c12becbb9b-apiservice-cert\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662591 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bada3e55-0293-4982-a02e-06d685698bd9-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662771 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662895 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-metrics-certs\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662981 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-audit\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662994 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-images\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663071 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663102 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79030fb7-fcf1-4404-a300-e8c12becbb9b-webhook-cert\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663125 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-bound-sa-token\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663154 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663182 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663206 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663239 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663322 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-serving-cert\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663347 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/79030fb7-fcf1-4404-a300-e8c12becbb9b-tmpfs\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663389 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bada3e55-0293-4982-a02e-06d685698bd9-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.661196 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-config\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663426 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663484 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad85cac4-4f5d-4801-a73f-8c7c01806627-config-volume\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663518 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663548 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663573 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d9eb7832-4c59-478c-997b-d0037c9e0abf-proxy-tls\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663596 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvdbm\" (UniqueName: \"kubernetes.io/projected/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-kube-api-access-qvdbm\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663619 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4a8572cc-2631-4b88-8a65-7b93f16951cb-srv-cert\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663642 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-certificates\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663664 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-oauth-config\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663687 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9wtq\" (UniqueName: \"kubernetes.io/projected/bada3e55-0293-4982-a02e-06d685698bd9-kube-api-access-p9wtq\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663710 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b962c13b-37be-46d3-91ab-5fde6214ff03-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663733 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b962c13b-37be-46d3-91ab-5fde6214ff03-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663757 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-default-certificate\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663780 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-service-ca-bundle\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.663807 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-proxy-tls\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664028 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-trusted-ca\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664052 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/479f7d01-fd35-4d08-9477-70efd86ed7f0-installation-pull-secrets\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664075 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-ca\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664099 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e248d293-d2aa-4bfc-a575-24529404e90c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664125 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz94x\" (UniqueName: \"kubernetes.io/projected/292203b1-555c-4331-90c7-f3a56ee042ba-kube-api-access-pz94x\") pod \"control-plane-machine-set-operator-78cbb6b69f-9ss4k\" (UID: \"292203b1-555c-4331-90c7-f3a56ee042ba\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664155 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664179 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664189 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664240 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rhjg\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-kube-api-access-6rhjg\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664297 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b12a716b-9585-42e4-88c8-aebd93d4f6de-config\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664333 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a82f7da-19cc-4d67-8384-be35fad097ed-serving-cert\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664358 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-config\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664386 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f47c7634-aeac-48cb-aeaa-dc06956728ff-config\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664412 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68bg4\" (UniqueName: \"kubernetes.io/projected/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-kube-api-access-68bg4\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664438 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad85cac4-4f5d-4801-a73f-8c7c01806627-metrics-tls\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664462 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-stats-auth\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664484 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcjtg\" (UniqueName: \"kubernetes.io/projected/f47c7634-aeac-48cb-aeaa-dc06956728ff-kube-api-access-zcjtg\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664513 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/479f7d01-fd35-4d08-9477-70efd86ed7f0-ca-trust-extracted\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664540 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664567 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvlpj\" (UniqueName: \"kubernetes.io/projected/7a82f7da-19cc-4d67-8384-be35fad097ed-kube-api-access-bvlpj\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664590 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnwzm\" (UniqueName: \"kubernetes.io/projected/b962c13b-37be-46d3-91ab-5fde6214ff03-kube-api-access-rnwzm\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664621 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664644 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-client\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664672 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e248d293-d2aa-4bfc-a575-24529404e90c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664696 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/292203b1-555c-4331-90c7-f3a56ee042ba-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9ss4k\" (UID: \"292203b1-555c-4331-90c7-f3a56ee042ba\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664726 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664751 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-config\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664777 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7sgd\" (UniqueName: \"kubernetes.io/projected/79030fb7-fcf1-4404-a300-e8c12becbb9b-kube-api-access-f7sgd\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664800 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2qsg\" (UniqueName: \"kubernetes.io/projected/b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8-kube-api-access-n2qsg\") pod \"dns-operator-744455d44c-5sbb6\" (UID: \"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8\") " pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664843 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzv7s\" (UniqueName: \"kubernetes.io/projected/74086034-016c-4df6-bd1e-c4f99eb3edbe-kube-api-access-fzv7s\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664870 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664892 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-oauth-serving-cert\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664922 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664959 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ltkc\" (UniqueName: \"kubernetes.io/projected/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-kube-api-access-7ltkc\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.664985 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjlvt\" (UniqueName: \"kubernetes.io/projected/d9eb7832-4c59-478c-997b-d0037c9e0abf-kube-api-access-fjlvt\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665007 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f47c7634-aeac-48cb-aeaa-dc06956728ff-serving-cert\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665029 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-auth-proxy-config\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665048 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-metrics-tls\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665071 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665093 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-trusted-ca\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665118 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8-metrics-tls\") pod \"dns-operator-744455d44c-5sbb6\" (UID: \"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8\") " pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665147 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b12a716b-9585-42e4-88c8-aebd93d4f6de-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.665434 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.666083 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.666966 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.667497 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.668012 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.668231 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-config\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.668574 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.669513 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.662886 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6737ff6f-0804-452d-80b7-b8fc21a6419e-serving-cert\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.670566 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-trusted-ca-bundle\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.674741 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6737ff6f-0804-452d-80b7-b8fc21a6419e-service-ca-bundle\") pod \"authentication-operator-69f744f599-fmvk5\" (UID: \"6737ff6f-0804-452d-80b7-b8fc21a6419e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.675601 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f92a86-663f-4101-9429-ffcd900bef67-config\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.675979 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/962d971d-f0de-4d22-a854-e4a65644b9b8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.677021 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-audit-policies\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.677107 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.177068714 +0000 UTC m=+147.348661265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.677291 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.678202 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-etcd-client\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.678219 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") pod \"controller-manager-879f6c89f-zpfvq\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.679228 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.679476 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/62f92a86-663f-4101-9429-ffcd900bef67-etcd-client\") pod \"apiserver-76f77b778f-j2gbr\" (UID: \"62f92a86-663f-4101-9429-ffcd900bef67\") " pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.681421 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.681610 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-serving-cert\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.682986 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-njjzf\" (UID: \"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.683104 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25770358-1610-4bfe-bff7-4acf81e687e8-serving-cert\") pod \"openshift-config-operator-7777fb866f-rgkhp\" (UID: \"25770358-1610-4bfe-bff7-4acf81e687e8\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.684099 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/962d971d-f0de-4d22-a854-e4a65644b9b8-images\") pod \"machine-api-operator-5694c8668f-wh2qp\" (UID: \"962d971d-f0de-4d22-a854-e4a65644b9b8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.688313 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qb49w\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.765925 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.766091 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.266066316 +0000 UTC m=+147.437658657 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.766377 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qn7d\" (UniqueName: \"kubernetes.io/projected/5c4e6d67-533f-47f9-b744-20efbb4c9df6-kube-api-access-4qn7d\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.766504 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b12a716b-9585-42e4-88c8-aebd93d4f6de-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.766625 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d3e50a3c-8b55-437d-a426-45594219a120-srv-cert\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.766726 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-tls\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.766805 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b12a716b-9585-42e4-88c8-aebd93d4f6de-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.766961 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b962c13b-37be-46d3-91ab-5fde6214ff03-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767088 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d3e50a3c-8b55-437d-a426-45594219a120-profile-collector-cert\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767275 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-csi-data-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767337 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0af2c49d-affd-4abb-8910-5d9c717f5b78-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767382 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9eb7832-4c59-478c-997b-d0037c9e0abf-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767407 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-socket-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767435 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-service-ca\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767457 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgldm\" (UniqueName: \"kubernetes.io/projected/e76ec096-df5f-4aa6-93d4-e2824b4dc454-kube-api-access-sgldm\") pod \"downloads-7954f5f757-f78nf\" (UID: \"e76ec096-df5f-4aa6-93d4-e2824b4dc454\") " pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767479 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54711cfd-28d9-4161-90fb-a87a7a6255de-config\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767517 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-trusted-ca-bundle\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767542 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767564 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e248d293-d2aa-4bfc-a575-24529404e90c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767586 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-service-ca\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767609 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlvcg\" (UniqueName: \"kubernetes.io/projected/412d82e0-9b92-45f4-8030-8f91fffe3e9a-kube-api-access-qlvcg\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767633 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkt74\" (UniqueName: \"kubernetes.io/projected/ad85cac4-4f5d-4801-a73f-8c7c01806627-kube-api-access-fkt74\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767655 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e3551b47-6cfc-4240-bb74-bf7cc0600a96-cert\") pod \"ingress-canary-6579r\" (UID: \"e3551b47-6cfc-4240-bb74-bf7cc0600a96\") " pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767725 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767751 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4a8572cc-2631-4b88-8a65-7b93f16951cb-profile-collector-cert\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767774 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2hsf\" (UniqueName: \"kubernetes.io/projected/4a8572cc-2631-4b88-8a65-7b93f16951cb-kube-api-access-q2hsf\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767801 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-plugins-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767849 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79030fb7-fcf1-4404-a300-e8c12becbb9b-apiservice-cert\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767873 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bada3e55-0293-4982-a02e-06d685698bd9-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767895 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767942 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-metrics-certs\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.767965 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-images\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768001 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79030fb7-fcf1-4404-a300-e8c12becbb9b-webhook-cert\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768027 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54711cfd-28d9-4161-90fb-a87a7a6255de-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768051 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-bound-sa-token\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768125 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxdvl\" (UniqueName: \"kubernetes.io/projected/0af2c49d-affd-4abb-8910-5d9c717f5b78-kube-api-access-pxdvl\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768163 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-serving-cert\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768186 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/79030fb7-fcf1-4404-a300-e8c12becbb9b-tmpfs\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768212 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bada3e55-0293-4982-a02e-06d685698bd9-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768248 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad85cac4-4f5d-4801-a73f-8c7c01806627-config-volume\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768280 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d9eb7832-4c59-478c-997b-d0037c9e0abf-proxy-tls\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768303 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvdbm\" (UniqueName: \"kubernetes.io/projected/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-kube-api-access-qvdbm\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768325 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4a8572cc-2631-4b88-8a65-7b93f16951cb-srv-cert\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768348 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-certificates\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768354 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9eb7832-4c59-478c-997b-d0037c9e0abf-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768372 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-oauth-config\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768407 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9wtq\" (UniqueName: \"kubernetes.io/projected/bada3e55-0293-4982-a02e-06d685698bd9-kube-api-access-p9wtq\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768438 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b962c13b-37be-46d3-91ab-5fde6214ff03-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768466 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b962c13b-37be-46d3-91ab-5fde6214ff03-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768490 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-default-certificate\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768516 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-service-ca-bundle\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768525 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b962c13b-37be-46d3-91ab-5fde6214ff03-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768540 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-proxy-tls\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768607 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-trusted-ca\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768635 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-ca\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768661 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e248d293-d2aa-4bfc-a575-24529404e90c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768689 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz94x\" (UniqueName: \"kubernetes.io/projected/292203b1-555c-4331-90c7-f3a56ee042ba-kube-api-access-pz94x\") pod \"control-plane-machine-set-operator-78cbb6b69f-9ss4k\" (UID: \"292203b1-555c-4331-90c7-f3a56ee042ba\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768717 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/479f7d01-fd35-4d08-9477-70efd86ed7f0-installation-pull-secrets\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768742 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rhjg\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-kube-api-access-6rhjg\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768770 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b12a716b-9585-42e4-88c8-aebd93d4f6de-config\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768834 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a82f7da-19cc-4d67-8384-be35fad097ed-serving-cert\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768862 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-config\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768887 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f47c7634-aeac-48cb-aeaa-dc06956728ff-config\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768944 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68bg4\" (UniqueName: \"kubernetes.io/projected/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-kube-api-access-68bg4\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.768973 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64vfb\" (UniqueName: \"kubernetes.io/projected/e3551b47-6cfc-4240-bb74-bf7cc0600a96-kube-api-access-64vfb\") pod \"ingress-canary-6579r\" (UID: \"e3551b47-6cfc-4240-bb74-bf7cc0600a96\") " pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769005 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-service-ca\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769013 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad85cac4-4f5d-4801-a73f-8c7c01806627-metrics-tls\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769065 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-stats-auth\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769094 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcjtg\" (UniqueName: \"kubernetes.io/projected/f47c7634-aeac-48cb-aeaa-dc06956728ff-kube-api-access-zcjtg\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769147 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/479f7d01-fd35-4d08-9477-70efd86ed7f0-ca-trust-extracted\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769175 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9r9c\" (UniqueName: \"kubernetes.io/projected/80763c3d-5c23-4a04-83ed-328b856d84e8-kube-api-access-d9r9c\") pod \"multus-admission-controller-857f4d67dd-vmnz8\" (UID: \"80763c3d-5c23-4a04-83ed-328b856d84e8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769198 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-mountpoint-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769241 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvlpj\" (UniqueName: \"kubernetes.io/projected/7a82f7da-19cc-4d67-8384-be35fad097ed-kube-api-access-bvlpj\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770394 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b12a716b-9585-42e4-88c8-aebd93d4f6de-config\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.769264 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnwzm\" (UniqueName: \"kubernetes.io/projected/b962c13b-37be-46d3-91ab-5fde6214ff03-kube-api-access-rnwzm\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770635 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6t5r\" (UniqueName: \"kubernetes.io/projected/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-kube-api-access-t6t5r\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770656 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/54711cfd-28d9-4161-90fb-a87a7a6255de-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770678 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5c4e6d67-533f-47f9-b744-20efbb4c9df6-signing-key\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770700 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-client\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770720 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0af2c49d-affd-4abb-8910-5d9c717f5b78-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770755 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/80763c3d-5c23-4a04-83ed-328b856d84e8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vmnz8\" (UID: \"80763c3d-5c23-4a04-83ed-328b856d84e8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770781 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/292203b1-555c-4331-90c7-f3a56ee042ba-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9ss4k\" (UID: \"292203b1-555c-4331-90c7-f3a56ee042ba\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770805 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e248d293-d2aa-4bfc-a575-24529404e90c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770846 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdk5h\" (UniqueName: \"kubernetes.io/projected/d3e50a3c-8b55-437d-a426-45594219a120-kube-api-access-jdk5h\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770868 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2c198829-c4dc-459f-b29a-e705390ef9eb-secret-volume\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770887 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7dfv\" (UniqueName: \"kubernetes.io/projected/2c198829-c4dc-459f-b29a-e705390ef9eb-kube-api-access-w7dfv\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770909 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-config\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770931 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7sgd\" (UniqueName: \"kubernetes.io/projected/79030fb7-fcf1-4404-a300-e8c12becbb9b-kube-api-access-f7sgd\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770951 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2qsg\" (UniqueName: \"kubernetes.io/projected/b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8-kube-api-access-n2qsg\") pod \"dns-operator-744455d44c-5sbb6\" (UID: \"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8\") " pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.770968 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2c198829-c4dc-459f-b29a-e705390ef9eb-config-volume\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771002 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzv7s\" (UniqueName: \"kubernetes.io/projected/74086034-016c-4df6-bd1e-c4f99eb3edbe-kube-api-access-fzv7s\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771026 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-oauth-serving-cert\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771067 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771088 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-registration-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771107 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5c4e6d67-533f-47f9-b744-20efbb4c9df6-signing-cabundle\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771132 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ltkc\" (UniqueName: \"kubernetes.io/projected/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-kube-api-access-7ltkc\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771156 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjlvt\" (UniqueName: \"kubernetes.io/projected/d9eb7832-4c59-478c-997b-d0037c9e0abf-kube-api-access-fjlvt\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771180 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-metrics-tls\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771199 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f47c7634-aeac-48cb-aeaa-dc06956728ff-serving-cert\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771852 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-auth-proxy-config\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771904 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-trusted-ca\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.771934 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8-metrics-tls\") pod \"dns-operator-744455d44c-5sbb6\" (UID: \"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8\") " pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.772002 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-oauth-config\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.773702 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bada3e55-0293-4982-a02e-06d685698bd9-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.774558 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-trusted-ca\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.775015 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-tls\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.775473 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-service-ca-bundle\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.775733 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-proxy-tls\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.775933 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-ca\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.776346 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad85cac4-4f5d-4801-a73f-8c7c01806627-metrics-tls\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.776895 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-default-certificate\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.777189 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b12a716b-9585-42e4-88c8-aebd93d4f6de-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.777505 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/79030fb7-fcf1-4404-a300-e8c12becbb9b-tmpfs\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.777665 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-images\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.778023 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a82f7da-19cc-4d67-8384-be35fad097ed-config\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.778223 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f47c7634-aeac-48cb-aeaa-dc06956728ff-config\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.779390 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.779430 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-trusted-ca-bundle\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.780145 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-metrics-certs\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.780214 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-service-ca\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.780895 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bada3e55-0293-4982-a02e-06d685698bd9-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.781376 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/479f7d01-fd35-4d08-9477-70efd86ed7f0-ca-trust-extracted\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.782283 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad85cac4-4f5d-4801-a73f-8c7c01806627-config-volume\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.783628 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79030fb7-fcf1-4404-a300-e8c12becbb9b-webhook-cert\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.784121 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-config\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.785177 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-certificates\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.785321 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79030fb7-fcf1-4404-a300-e8c12becbb9b-apiservice-cert\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.786509 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-oauth-serving-cert\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.787023 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.287006738 +0000 UTC m=+147.458599079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.788993 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4a8572cc-2631-4b88-8a65-7b93f16951cb-srv-cert\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.789246 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-auth-proxy-config\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.790387 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-trusted-ca\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.793255 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d9eb7832-4c59-478c-997b-d0037c9e0abf-proxy-tls\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.793482 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f47c7634-aeac-48cb-aeaa-dc06956728ff-serving-cert\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.793603 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b962c13b-37be-46d3-91ab-5fde6214ff03-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.793869 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e248d293-d2aa-4bfc-a575-24529404e90c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.794291 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/479f7d01-fd35-4d08-9477-70efd86ed7f0-installation-pull-secrets\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.794323 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-stats-auth\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.794520 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.794623 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e248d293-d2aa-4bfc-a575-24529404e90c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.795135 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7a82f7da-19cc-4d67-8384-be35fad097ed-etcd-client\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.795302 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a82f7da-19cc-4d67-8384-be35fad097ed-serving-cert\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.795326 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b12a716b-9585-42e4-88c8-aebd93d4f6de-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-r9dbs\" (UID: \"b12a716b-9585-42e4-88c8-aebd93d4f6de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.795738 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8-metrics-tls\") pod \"dns-operator-744455d44c-5sbb6\" (UID: \"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8\") " pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.796573 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-metrics-tls\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.798998 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-serving-cert\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.800756 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/292203b1-555c-4331-90c7-f3a56ee042ba-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9ss4k\" (UID: \"292203b1-555c-4331-90c7-f3a56ee042ba\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.803259 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4a8572cc-2631-4b88-8a65-7b93f16951cb-profile-collector-cert\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.832720 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9wtq\" (UniqueName: \"kubernetes.io/projected/bada3e55-0293-4982-a02e-06d685698bd9-kube-api-access-p9wtq\") pod \"openshift-controller-manager-operator-756b6f6bc6-ksx8f\" (UID: \"bada3e55-0293-4982-a02e-06d685698bd9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.850967 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.852594 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b962c13b-37be-46d3-91ab-5fde6214ff03-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.872712 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.872906 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.372881798 +0000 UTC m=+147.544474139 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873388 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d3e50a3c-8b55-437d-a426-45594219a120-profile-collector-cert\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873437 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-csi-data-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873470 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0af2c49d-affd-4abb-8910-5d9c717f5b78-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873493 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-socket-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873526 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54711cfd-28d9-4161-90fb-a87a7a6255de-config\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873587 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e3551b47-6cfc-4240-bb74-bf7cc0600a96-cert\") pod \"ingress-canary-6579r\" (UID: \"e3551b47-6cfc-4240-bb74-bf7cc0600a96\") " pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873633 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-plugins-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873677 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54711cfd-28d9-4161-90fb-a87a7a6255de-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873713 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxdvl\" (UniqueName: \"kubernetes.io/projected/0af2c49d-affd-4abb-8910-5d9c717f5b78-kube-api-access-pxdvl\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873839 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64vfb\" (UniqueName: \"kubernetes.io/projected/e3551b47-6cfc-4240-bb74-bf7cc0600a96-kube-api-access-64vfb\") pod \"ingress-canary-6579r\" (UID: \"e3551b47-6cfc-4240-bb74-bf7cc0600a96\") " pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873880 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9r9c\" (UniqueName: \"kubernetes.io/projected/80763c3d-5c23-4a04-83ed-328b856d84e8-kube-api-access-d9r9c\") pod \"multus-admission-controller-857f4d67dd-vmnz8\" (UID: \"80763c3d-5c23-4a04-83ed-328b856d84e8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873905 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-mountpoint-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873941 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6t5r\" (UniqueName: \"kubernetes.io/projected/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-kube-api-access-t6t5r\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873963 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/54711cfd-28d9-4161-90fb-a87a7a6255de-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.873985 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5c4e6d67-533f-47f9-b744-20efbb4c9df6-signing-key\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874009 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0af2c49d-affd-4abb-8910-5d9c717f5b78-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874032 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/80763c3d-5c23-4a04-83ed-328b856d84e8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vmnz8\" (UID: \"80763c3d-5c23-4a04-83ed-328b856d84e8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874061 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdk5h\" (UniqueName: \"kubernetes.io/projected/d3e50a3c-8b55-437d-a426-45594219a120-kube-api-access-jdk5h\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874084 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2c198829-c4dc-459f-b29a-e705390ef9eb-secret-volume\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874119 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7dfv\" (UniqueName: \"kubernetes.io/projected/2c198829-c4dc-459f-b29a-e705390ef9eb-kube-api-access-w7dfv\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874158 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2c198829-c4dc-459f-b29a-e705390ef9eb-config-volume\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874199 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874224 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-registration-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874246 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5c4e6d67-533f-47f9-b744-20efbb4c9df6-signing-cabundle\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874304 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qn7d\" (UniqueName: \"kubernetes.io/projected/5c4e6d67-533f-47f9-b744-20efbb4c9df6-kube-api-access-4qn7d\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.874330 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d3e50a3c-8b55-437d-a426-45594219a120-srv-cert\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.876464 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.376449885 +0000 UTC m=+147.548042226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.877524 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d3e50a3c-8b55-437d-a426-45594219a120-srv-cert\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.877627 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5c4e6d67-533f-47f9-b744-20efbb4c9df6-signing-cabundle\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.877965 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-registration-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.877962 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2c198829-c4dc-459f-b29a-e705390ef9eb-config-volume\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878023 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-plugins-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878066 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-socket-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878153 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878164 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-csi-data-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878697 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0af2c49d-affd-4abb-8910-5d9c717f5b78-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878787 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0af2c49d-affd-4abb-8910-5d9c717f5b78-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878795 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54711cfd-28d9-4161-90fb-a87a7a6255de-config\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.878858 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-mountpoint-dir\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.880151 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5c4e6d67-533f-47f9-b744-20efbb4c9df6-signing-key\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.881026 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54711cfd-28d9-4161-90fb-a87a7a6255de-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.881407 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e3551b47-6cfc-4240-bb74-bf7cc0600a96-cert\") pod \"ingress-canary-6579r\" (UID: \"e3551b47-6cfc-4240-bb74-bf7cc0600a96\") " pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.881457 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d3e50a3c-8b55-437d-a426-45594219a120-profile-collector-cert\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.881991 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/80763c3d-5c23-4a04-83ed-328b856d84e8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vmnz8\" (UID: \"80763c3d-5c23-4a04-83ed-328b856d84e8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.882285 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2c198829-c4dc-459f-b29a-e705390ef9eb-secret-volume\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.883993 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.889518 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-bound-sa-token\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.889785 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.895976 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.897795 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.908027 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.911919 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgldm\" (UniqueName: \"kubernetes.io/projected/e76ec096-df5f-4aa6-93d4-e2824b4dc454-kube-api-access-sgldm\") pod \"downloads-7954f5f757-f78nf\" (UID: \"e76ec096-df5f-4aa6-93d4-e2824b4dc454\") " pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.926910 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.933200 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e248d293-d2aa-4bfc-a575-24529404e90c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6k9qw\" (UID: \"e248d293-d2aa-4bfc-a575-24529404e90c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.937872 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.954549 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlvcg\" (UniqueName: \"kubernetes.io/projected/412d82e0-9b92-45f4-8030-8f91fffe3e9a-kube-api-access-qlvcg\") pod \"marketplace-operator-79b997595-9hj2h\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.975551 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:01 crc kubenswrapper[4675]: E1125 12:30:01.976227 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.476204226 +0000 UTC m=+147.647796567 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.978235 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkt74\" (UniqueName: \"kubernetes.io/projected/ad85cac4-4f5d-4801-a73f-8c7c01806627-kube-api-access-fkt74\") pod \"dns-default-86gc4\" (UID: \"ad85cac4-4f5d-4801-a73f-8c7c01806627\") " pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:01 crc kubenswrapper[4675]: I1125 12:30:01.993045 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvlpj\" (UniqueName: \"kubernetes.io/projected/7a82f7da-19cc-4d67-8384-be35fad097ed-kube-api-access-bvlpj\") pod \"etcd-operator-b45778765-mqfhd\" (UID: \"7a82f7da-19cc-4d67-8384-be35fad097ed\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.010637 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.012059 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnwzm\" (UniqueName: \"kubernetes.io/projected/b962c13b-37be-46d3-91ab-5fde6214ff03-kube-api-access-rnwzm\") pod \"cluster-image-registry-operator-dc59b4c8b-b7fmr\" (UID: \"b962c13b-37be-46d3-91ab-5fde6214ff03\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.016716 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.032254 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rhjg\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-kube-api-access-6rhjg\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.051083 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68bg4\" (UniqueName: \"kubernetes.io/projected/ee9554d7-9362-440a-a8b6-f6fefcd2fe49-kube-api-access-68bg4\") pod \"router-default-5444994796-ztlr8\" (UID: \"ee9554d7-9362-440a-a8b6-f6fefcd2fe49\") " pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.071057 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvdbm\" (UniqueName: \"kubernetes.io/projected/f94d5d6d-5e22-4aef-9fa9-b8c178d78454-kube-api-access-qvdbm\") pod \"ingress-operator-5b745b69d9-v5q5t\" (UID: \"f94d5d6d-5e22-4aef-9fa9-b8c178d78454\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.076795 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.077118 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.577106076 +0000 UTC m=+147.748698417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.094940 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.096714 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2qsg\" (UniqueName: \"kubernetes.io/projected/b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8-kube-api-access-n2qsg\") pod \"dns-operator-744455d44c-5sbb6\" (UID: \"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8\") " pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.117497 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7sgd\" (UniqueName: \"kubernetes.io/projected/79030fb7-fcf1-4404-a300-e8c12becbb9b-kube-api-access-f7sgd\") pod \"packageserver-d55dfcdfc-kcdzn\" (UID: \"79030fb7-fcf1-4404-a300-e8c12becbb9b\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.143339 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz94x\" (UniqueName: \"kubernetes.io/projected/292203b1-555c-4331-90c7-f3a56ee042ba-kube-api-access-pz94x\") pod \"control-plane-machine-set-operator-78cbb6b69f-9ss4k\" (UID: \"292203b1-555c-4331-90c7-f3a56ee042ba\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.160140 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.161477 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.169763 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.170999 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ltkc\" (UniqueName: \"kubernetes.io/projected/e3bafb24-e664-4215-8093-8d5bbe5cf4cd-kube-api-access-7ltkc\") pod \"machine-config-operator-74547568cd-f9zd9\" (UID: \"e3bafb24-e664-4215-8093-8d5bbe5cf4cd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.176950 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjlvt\" (UniqueName: \"kubernetes.io/projected/d9eb7832-4c59-478c-997b-d0037c9e0abf-kube-api-access-fjlvt\") pod \"machine-config-controller-84d6567774-682hw\" (UID: \"d9eb7832-4c59-478c-997b-d0037c9e0abf\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.177427 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.177573 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.677550041 +0000 UTC m=+147.849142372 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.177725 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glvxs\" (UniqueName: \"kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.177865 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.177893 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwxw5\" (UniqueName: \"kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.178308 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.678293405 +0000 UTC m=+147.849885746 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.191513 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glvxs\" (UniqueName: \"kubernetes.io/projected/2351e3a8-bdfe-4a50-b234-fe4a84b82169-kube-api-access-glvxs\") pod \"machine-approver-56656f9798-zkcp6\" (UID: \"2351e3a8-bdfe-4a50-b234-fe4a84b82169\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.195701 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwxw5\" (UniqueName: \"kubernetes.io/projected/4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa-kube-api-access-nwxw5\") pod \"apiserver-7bbb656c7d-9n5x8\" (UID: \"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.196148 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzv7s\" (UniqueName: \"kubernetes.io/projected/74086034-016c-4df6-bd1e-c4f99eb3edbe-kube-api-access-fzv7s\") pod \"console-f9d7485db-r2pgw\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.207099 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.212992 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.217724 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2hsf\" (UniqueName: \"kubernetes.io/projected/4a8572cc-2631-4b88-8a65-7b93f16951cb-kube-api-access-q2hsf\") pod \"catalog-operator-68c6474976-4jwnj\" (UID: \"4a8572cc-2631-4b88-8a65-7b93f16951cb\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.228432 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.245477 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.256118 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcjtg\" (UniqueName: \"kubernetes.io/projected/f47c7634-aeac-48cb-aeaa-dc06956728ff-kube-api-access-zcjtg\") pod \"service-ca-operator-777779d784-cm2s5\" (UID: \"f47c7634-aeac-48cb-aeaa-dc06956728ff\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.256510 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.260073 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.267074 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.273886 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6t5r\" (UniqueName: \"kubernetes.io/projected/c4a9d2d1-a49d-4103-9791-e5e8e7a04392-kube-api-access-t6t5r\") pod \"csi-hostpathplugin-7dbvr\" (UID: \"c4a9d2d1-a49d-4103-9791-e5e8e7a04392\") " pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.275401 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.279721 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.280739 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.780724535 +0000 UTC m=+147.952316876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.319354 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/54711cfd-28d9-4161-90fb-a87a7a6255de-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4mv8m\" (UID: \"54711cfd-28d9-4161-90fb-a87a7a6255de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.335035 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.337144 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7dfv\" (UniqueName: \"kubernetes.io/projected/2c198829-c4dc-459f-b29a-e705390ef9eb-kube-api-access-w7dfv\") pod \"collect-profiles-29401230-8s8kg\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.339639 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qn7d\" (UniqueName: \"kubernetes.io/projected/5c4e6d67-533f-47f9-b744-20efbb4c9df6-kube-api-access-4qn7d\") pod \"service-ca-9c57cc56f-bwvrd\" (UID: \"5c4e6d67-533f-47f9-b744-20efbb4c9df6\") " pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.356007 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdk5h\" (UniqueName: \"kubernetes.io/projected/d3e50a3c-8b55-437d-a426-45594219a120-kube-api-access-jdk5h\") pod \"olm-operator-6b444d44fb-d4t62\" (UID: \"d3e50a3c-8b55-437d-a426-45594219a120\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.368754 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.372945 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9r9c\" (UniqueName: \"kubernetes.io/projected/80763c3d-5c23-4a04-83ed-328b856d84e8-kube-api-access-d9r9c\") pod \"multus-admission-controller-857f4d67dd-vmnz8\" (UID: \"80763c3d-5c23-4a04-83ed-328b856d84e8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.394483 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.394931 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.894916556 +0000 UTC m=+148.066508897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.396670 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.420925 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.421837 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxdvl\" (UniqueName: \"kubernetes.io/projected/0af2c49d-affd-4abb-8910-5d9c717f5b78-kube-api-access-pxdvl\") pod \"kube-storage-version-migrator-operator-b67b599dd-nftsb\" (UID: \"0af2c49d-affd-4abb-8910-5d9c717f5b78\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.432397 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64vfb\" (UniqueName: \"kubernetes.io/projected/e3551b47-6cfc-4240-bb74-bf7cc0600a96-kube-api-access-64vfb\") pod \"ingress-canary-6579r\" (UID: \"e3551b47-6cfc-4240-bb74-bf7cc0600a96\") " pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.438995 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.464049 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.476160 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.495421 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.496000 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:02.995980831 +0000 UTC m=+148.167573172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.520916 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.554328 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.590160 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.596741 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.597159 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.097148159 +0000 UTC m=+148.268740490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.597441 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.608294 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.614224 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.620877 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.642352 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-6579r" Nov 25 12:30:02 crc kubenswrapper[4675]: W1125 12:30:02.682515 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee9554d7_9362_440a_a8b6_f6fefcd2fe49.slice/crio-280dac94c41ecf7610285ae6cf98ef621f55c55d1302d7cd6aa382fc94f2f246 WatchSource:0}: Error finding container 280dac94c41ecf7610285ae6cf98ef621f55c55d1302d7cd6aa382fc94f2f246: Status 404 returned error can't find the container with id 280dac94c41ecf7610285ae6cf98ef621f55c55d1302d7cd6aa382fc94f2f246 Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.698180 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.698283 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.198260425 +0000 UTC m=+148.369852766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.698499 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.698872 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.198855914 +0000 UTC m=+148.370448255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.699230 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" podStartSLOduration=126.699209596 podStartE2EDuration="2m6.699209596s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:02.698021288 +0000 UTC m=+147.869613629" watchObservedRunningTime="2025-11-25 12:30:02.699209596 +0000 UTC m=+147.870801937" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.722581 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-fmvk5"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.761639 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpfvq"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.779939 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qsjxt" podStartSLOduration=126.779917109 podStartE2EDuration="2m6.779917109s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:02.778304987 +0000 UTC m=+147.949897338" watchObservedRunningTime="2025-11-25 12:30:02.779917109 +0000 UTC m=+147.951509470" Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.789348 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.792236 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wh2qp"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.801192 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.801671 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.301655796 +0000 UTC m=+148.473248137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.805848 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.834769 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qb49w"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.848424 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.862044 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-j2gbr"] Nov 25 12:30:02 crc kubenswrapper[4675]: I1125 12:30:02.903251 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:02 crc kubenswrapper[4675]: E1125 12:30:02.903549 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.403538217 +0000 UTC m=+148.575130558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.006184 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.007326 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.506617647 +0000 UTC m=+148.678209988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.054919 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-f78nf"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.057387 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.060739 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9hj2h"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.069797 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.107569 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.107969 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.6079549 +0000 UTC m=+148.779547241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.187718 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.209378 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.209512 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.709491341 +0000 UTC m=+148.881083692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.209989 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.709981337 +0000 UTC m=+148.881573678 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.209549 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.242849 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mqfhd"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.251488 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.270460 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" event={"ID":"6737ff6f-0804-452d-80b7-b8fc21a6419e","Type":"ContainerStarted","Data":"61fc76a475c0c033b79b73864d3e5d3dc6426259ebb781a715dd7bc0868850b1"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.281984 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" event={"ID":"2351e3a8-bdfe-4a50-b234-fe4a84b82169","Type":"ContainerStarted","Data":"21dda37cc238c964eae83f0a13bfd483efd078553c45a4a15690033e3a8f757d"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.320436 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.320873 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.82085853 +0000 UTC m=+148.992450871 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.332706 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5582k" podStartSLOduration=128.332690805 podStartE2EDuration="2m8.332690805s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:03.304712316 +0000 UTC m=+148.476304657" watchObservedRunningTime="2025-11-25 12:30:03.332690805 +0000 UTC m=+148.504283146" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.346776 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-f78nf" event={"ID":"e76ec096-df5f-4aa6-93d4-e2824b4dc454","Type":"ContainerStarted","Data":"e51c1834224d2c4547711a32572fb2b54029c2f1a9d69aac6fd950cbe94564f1"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.351176 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" podStartSLOduration=127.351153305 podStartE2EDuration="2m7.351153305s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:03.33438819 +0000 UTC m=+148.505980521" watchObservedRunningTime="2025-11-25 12:30:03.351153305 +0000 UTC m=+148.522745646" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.408749 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" event={"ID":"412d82e0-9b92-45f4-8030-8f91fffe3e9a","Type":"ContainerStarted","Data":"3d0838cc3e9afd068798d4733b64ac2c557595cfb7c320a980a4c481d0c7a836"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.411650 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" event={"ID":"62f92a86-663f-4101-9429-ffcd900bef67","Type":"ContainerStarted","Data":"d55613688105643edf029ccf58d4bdafb5477ab6ddcc77079eed74482de5d160"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.421134 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" event={"ID":"bada3e55-0293-4982-a02e-06d685698bd9","Type":"ContainerStarted","Data":"6516f49e7ab0e9f823519950f2179d4be21bb13a251398edf85ff6c4725f0b90"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.422053 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.422382 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:03.92236914 +0000 UTC m=+149.093961481 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.430475 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.439135 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401215-djzft"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.468838 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" event={"ID":"25770358-1610-4bfe-bff7-4acf81e687e8","Type":"ContainerStarted","Data":"9a85f1654923fc9db13044aeba469785b5c0c82bc64a676d3613ebeb2431f230"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.480552 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" event={"ID":"962d971d-f0de-4d22-a854-e4a65644b9b8","Type":"ContainerStarted","Data":"df6710fa7865c07c72d547435d456bc33cbb33465b452ac1ec32264248aebe46"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.497833 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" event={"ID":"db01a316-423a-4238-8a5b-9839aaac33ff","Type":"ContainerStarted","Data":"68ff8422e9d83a05634e0c00185eb4d57db30891c5d8d3a113055b92f0444c71"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.503109 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.510305 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7dbvr"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.512036 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" event={"ID":"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0","Type":"ContainerStarted","Data":"676ac6a05445358c2e71be9803e93244b4a9f465f47cc313676243a6c48c44d5"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.522083 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-682hw"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.522747 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.524442 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.024422006 +0000 UTC m=+149.196014357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.531551 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ztlr8" event={"ID":"ee9554d7-9362-440a-a8b6-f6fefcd2fe49","Type":"ContainerStarted","Data":"280dac94c41ecf7610285ae6cf98ef621f55c55d1302d7cd6aa382fc94f2f246"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.537174 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.537276 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.537510 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.537569 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.537681 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.538682 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.038662539 +0000 UTC m=+149.210254950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.542181 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.554872 4675 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zpfvq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.554911 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.555732 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.557345 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.559242 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.577492 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba36f8b8-3b33-4302-9f4e-8f1b34a901ff" path="/var/lib/kubelet/pods/ba36f8b8-3b33-4302-9f4e-8f1b34a901ff/volumes" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.577757 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.577783 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" event={"ID":"292203b1-555c-4331-90c7-f3a56ee042ba","Type":"ContainerStarted","Data":"9a347eeef05629493eea5968b87facf496faf9d8de57c5fe88a0621031887174"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.577796 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" event={"ID":"9dfcfcb3-6d81-425b-98ae-925d3fbf2369","Type":"ContainerStarted","Data":"df75006f0b6d467b77b674b8ec32cabc98d3e70f0d4cceb03fa5888119886de4"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.577805 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" event={"ID":"f94d5d6d-5e22-4aef-9fa9-b8c178d78454","Type":"ContainerStarted","Data":"969fe990ad7de10be5d48c0b359688876cd92819739b43a221aa52d8c43f1e46"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.577886 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" event={"ID":"b12a716b-9585-42e4-88c8-aebd93d4f6de","Type":"ContainerStarted","Data":"7732a118a552001f4d91e57bb9cb7ff281211c705e2a4bd208e06a50eb0bbe95"} Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.590911 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.640349 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.640648 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.140628294 +0000 UTC m=+149.312220635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.642085 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.647888 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.147872609 +0000 UTC m=+149.319464950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.662537 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.665789 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-r2pgw"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.721134 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-86gc4"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.743155 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.743955 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.243933011 +0000 UTC m=+149.415525372 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: W1125 12:30:03.749925 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4a9d2d1_a49d_4103_9791_e5e8e7a04392.slice/crio-0980094039fe7e817d8951375d4f424ea4d22a95ecbc30908bbc219344db6795 WatchSource:0}: Error finding container 0980094039fe7e817d8951375d4f424ea4d22a95ecbc30908bbc219344db6795: Status 404 returned error can't find the container with id 0980094039fe7e817d8951375d4f424ea4d22a95ecbc30908bbc219344db6795 Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.818920 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-fmkdh" podStartSLOduration=6.818886567 podStartE2EDuration="6.818886567s" podCreationTimestamp="2025-11-25 12:29:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:03.816407166 +0000 UTC m=+148.987999527" watchObservedRunningTime="2025-11-25 12:30:03.818886567 +0000 UTC m=+148.990478928" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.847755 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.848126 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.348110277 +0000 UTC m=+149.519702618 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.851574 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.853383 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.869551 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-9ftpx" podStartSLOduration=128.869535723 podStartE2EDuration="2m8.869535723s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:03.866489314 +0000 UTC m=+149.038081655" watchObservedRunningTime="2025-11-25 12:30:03.869535723 +0000 UTC m=+149.041128064" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.869971 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5sbb6"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.870267 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.921494 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.941136 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn"] Nov 25 12:30:03 crc kubenswrapper[4675]: I1125 12:30:03.949928 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:03 crc kubenswrapper[4675]: E1125 12:30:03.950220 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.450204925 +0000 UTC m=+149.621797266 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.013753 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.018788 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.035470 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.041885 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-6579r"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.067479 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bwvrd"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.068995 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.070051 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.071200 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-ztlr8" podStartSLOduration=128.071185757 podStartE2EDuration="2m8.071185757s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:04.062934299 +0000 UTC m=+149.234526640" watchObservedRunningTime="2025-11-25 12:30:04.071185757 +0000 UTC m=+149.242778108" Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.072397 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.572379946 +0000 UTC m=+149.743972287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: W1125 12:30:04.129678 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4da7f1b1_e92f_41ae_8f2e_9d265b32d0aa.slice/crio-05b96efd3fa587d80e18ef8af2143124538b2157c61d5ca8e6e11af013566329 WatchSource:0}: Error finding container 05b96efd3fa587d80e18ef8af2143124538b2157c61d5ca8e6e11af013566329: Status 404 returned error can't find the container with id 05b96efd3fa587d80e18ef8af2143124538b2157c61d5ca8e6e11af013566329 Nov 25 12:30:04 crc kubenswrapper[4675]: W1125 12:30:04.148367 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3e50a3c_8b55_437d_a426_45594219a120.slice/crio-cbaad021d2d1a1830d358a9801cb3d121d2cf8677a81a5ad0809d062aaae635c WatchSource:0}: Error finding container cbaad021d2d1a1830d358a9801cb3d121d2cf8677a81a5ad0809d062aaae635c: Status 404 returned error can't find the container with id cbaad021d2d1a1830d358a9801cb3d121d2cf8677a81a5ad0809d062aaae635c Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.171562 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.173554 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.672713977 +0000 UTC m=+149.844306318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.177097 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" podStartSLOduration=129.177074739 podStartE2EDuration="2m9.177074739s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:04.089295276 +0000 UTC m=+149.260887617" watchObservedRunningTime="2025-11-25 12:30:04.177074739 +0000 UTC m=+149.348667080" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.179455 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vmnz8"] Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.257429 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.258837 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.258878 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.273926 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.274201 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.774189846 +0000 UTC m=+149.945782187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.374785 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.375337 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.875323022 +0000 UTC m=+150.046915363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.477263 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.478119 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:04.978100743 +0000 UTC m=+150.149693084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.578787 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.579701 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.079661483 +0000 UTC m=+150.251253824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.591359 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-r2pgw" event={"ID":"74086034-016c-4df6-bd1e-c4f99eb3edbe","Type":"ContainerStarted","Data":"ac77d76b2000499f6e8052b6fe3ca5fc67534bd2221c9418df2c1b238eae4886"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.600928 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" event={"ID":"79030fb7-fcf1-4404-a300-e8c12becbb9b","Type":"ContainerStarted","Data":"cc598bdb6bec6b84f6053598ab78f97921b31660f5bcc58bcb03372aa92b22f8"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.605366 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" event={"ID":"412d82e0-9b92-45f4-8030-8f91fffe3e9a","Type":"ContainerStarted","Data":"e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.606934 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.608174 4675 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9hj2h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.608254 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.612064 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" event={"ID":"f47c7634-aeac-48cb-aeaa-dc06956728ff","Type":"ContainerStarted","Data":"cc0fa00b83265c7be8de578e45b210d097ae5495726f8ed5ed97ae028721e6d3"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.630294 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" event={"ID":"9dfcfcb3-6d81-425b-98ae-925d3fbf2369","Type":"ContainerStarted","Data":"fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.631446 4675 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zpfvq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.631481 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.679574 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ztlr8" event={"ID":"ee9554d7-9362-440a-a8b6-f6fefcd2fe49","Type":"ContainerStarted","Data":"9d9886cdf1b62cdd702cb657aa28166c46b6ea9afaad1f5a9f741a6e298f6db0"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.681185 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.681740 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.181722391 +0000 UTC m=+150.353314732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.725465 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fe6bfb28193ca3e8c24cf38c6aee8bb4357c3e1d5a1847f7a5e35e2ed0c125c1"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.730557 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" event={"ID":"0af2c49d-affd-4abb-8910-5d9c717f5b78","Type":"ContainerStarted","Data":"c782f19d58b47019fa1bb244b992669a6b266b20623ffd2b10d662910d776dbb"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.740547 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" event={"ID":"e248d293-d2aa-4bfc-a575-24529404e90c","Type":"ContainerStarted","Data":"07f6a1bb61d7e35477e89e4f732b03c698b9b977e9d226d70e7aee208f28d663"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.745354 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" event={"ID":"b962c13b-37be-46d3-91ab-5fde6214ff03","Type":"ContainerStarted","Data":"d5df995941bddbc25f18fc2d8265a706b6a34b16864c71ad064228936eba8c2f"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.749426 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" event={"ID":"c4a9d2d1-a49d-4103-9791-e5e8e7a04392","Type":"ContainerStarted","Data":"0980094039fe7e817d8951375d4f424ea4d22a95ecbc30908bbc219344db6795"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.756656 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" podStartSLOduration=128.756640816 podStartE2EDuration="2m8.756640816s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:04.62816648 +0000 UTC m=+149.799758821" watchObservedRunningTime="2025-11-25 12:30:04.756640816 +0000 UTC m=+149.928233157" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.768402 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" podStartSLOduration=129.768381407 podStartE2EDuration="2m9.768381407s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:04.768344326 +0000 UTC m=+149.939936667" watchObservedRunningTime="2025-11-25 12:30:04.768381407 +0000 UTC m=+149.939973768" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.782035 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.782785 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.282756765 +0000 UTC m=+150.454349116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.793344 4675 generic.go:334] "Generic (PLEG): container finished" podID="62f92a86-663f-4101-9429-ffcd900bef67" containerID="a1e9bccb4246d38a280403f1986dea7108765477a66289aea556314e99a04e62" exitCode=0 Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.794222 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" event={"ID":"62f92a86-663f-4101-9429-ffcd900bef67","Type":"ContainerDied","Data":"a1e9bccb4246d38a280403f1986dea7108765477a66289aea556314e99a04e62"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.829189 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" event={"ID":"54711cfd-28d9-4161-90fb-a87a7a6255de","Type":"ContainerStarted","Data":"4f23fa892b868e335f8836ef8801d22b14b655a2a416b632f7420b071c8d59b5"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.838168 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" event={"ID":"d9eb7832-4c59-478c-997b-d0037c9e0abf","Type":"ContainerStarted","Data":"89bda56f058fd7d522b66b296a030c3b0bd4cb72c99f5f22e6a58ebe05dc7a53"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.844371 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" event={"ID":"7a82f7da-19cc-4d67-8384-be35fad097ed","Type":"ContainerStarted","Data":"6dbd942204733849fae42695dc8683dff4b396471c9671be2ba94f029d185ae9"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.861194 4675 generic.go:334] "Generic (PLEG): container finished" podID="25770358-1610-4bfe-bff7-4acf81e687e8" containerID="c79eb88a52cdb710fb702c18ec74f39de3a79656de1683dbf4d6e6674821b746" exitCode=0 Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.861896 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" event={"ID":"25770358-1610-4bfe-bff7-4acf81e687e8","Type":"ContainerDied","Data":"c79eb88a52cdb710fb702c18ec74f39de3a79656de1683dbf4d6e6674821b746"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.878066 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" event={"ID":"6737ff6f-0804-452d-80b7-b8fc21a6419e","Type":"ContainerStarted","Data":"5ac16b7d59331e945319c556b1367b79bea5a91538893ded0b842d57414d91f7"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.880715 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" event={"ID":"962d971d-f0de-4d22-a854-e4a65644b9b8","Type":"ContainerStarted","Data":"55c241d2fb0580820b424b15a1f2e56844d511405d49cce8b4a9536ba217c3e5"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.885727 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.892108 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.392066137 +0000 UTC m=+150.563658478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.916729 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-fmvk5" podStartSLOduration=129.916706848 podStartE2EDuration="2m9.916706848s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:04.914567809 +0000 UTC m=+150.086160180" watchObservedRunningTime="2025-11-25 12:30:04.916706848 +0000 UTC m=+150.088299199" Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.918721 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" event={"ID":"a4c78f1a-1f5d-49e2-9dd2-b3dffccc12b0","Type":"ContainerStarted","Data":"e2ac3d25e593bc0a4682e30e8bc22112600f66539246d6cf9440bf84cd30a77d"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.969292 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" event={"ID":"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8","Type":"ContainerStarted","Data":"4e63fe788ec0ff1d5c94dd459009f40728a6e6c732af85a2e630ebb2f0772f18"} Nov 25 12:30:04 crc kubenswrapper[4675]: I1125 12:30:04.988575 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:04 crc kubenswrapper[4675]: E1125 12:30:04.989686 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.48966955 +0000 UTC m=+150.661261891 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.015896 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-njjzf" podStartSLOduration=130.015873312 podStartE2EDuration="2m10.015873312s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:04.998363552 +0000 UTC m=+150.169955893" watchObservedRunningTime="2025-11-25 12:30:05.015873312 +0000 UTC m=+150.187465653" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.045354 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" event={"ID":"e3bafb24-e664-4215-8093-8d5bbe5cf4cd","Type":"ContainerStarted","Data":"59d7b087a9c0986a85b2d11f5befd9e7b3a86bafb725b9a483fd078dc4fe0b2b"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.056343 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-6579r" event={"ID":"e3551b47-6cfc-4240-bb74-bf7cc0600a96","Type":"ContainerStarted","Data":"e41f72b4690d90b2eaa06bf8cdb2fc2cd827f00a47687a40e97c75717d227749"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.056943 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" event={"ID":"b12a716b-9585-42e4-88c8-aebd93d4f6de","Type":"ContainerStarted","Data":"95080bc51aea917088672cc5bb1e60a4fb8d4e883c858655b792df6ba075495b"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.060596 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" event={"ID":"2c198829-c4dc-459f-b29a-e705390ef9eb","Type":"ContainerStarted","Data":"7dca8cc42bc0efa037ccc00d234992b51ea334af8503f213c3796b8ef28b022d"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.096864 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.097279 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.597269137 +0000 UTC m=+150.768861478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.108181 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-f78nf" event={"ID":"e76ec096-df5f-4aa6-93d4-e2824b4dc454","Type":"ContainerStarted","Data":"7033a57e88fb3742fddae56a9a764ecca7deabda5b92028c1a6e326a15fc1d90"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.108985 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.110783 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-86gc4" event={"ID":"ad85cac4-4f5d-4801-a73f-8c7c01806627","Type":"ContainerStarted","Data":"691e8f4aa3685aa5605b2af9f20205e8a18e746ba2f943dffaa353d178b46086"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.113729 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" event={"ID":"292203b1-555c-4331-90c7-f3a56ee042ba","Type":"ContainerStarted","Data":"07217e9910e47efed60e916176baf651b176e932507656c288454df3aa7e3d07"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.123216 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" event={"ID":"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa","Type":"ContainerStarted","Data":"05b96efd3fa587d80e18ef8af2143124538b2157c61d5ca8e6e11af013566329"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.127562 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.127626 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.135420 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-r9dbs" podStartSLOduration=129.135396306 podStartE2EDuration="2m9.135396306s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:05.079434647 +0000 UTC m=+150.251026988" watchObservedRunningTime="2025-11-25 12:30:05.135396306 +0000 UTC m=+150.306988637" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.152736 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" event={"ID":"bada3e55-0293-4982-a02e-06d685698bd9","Type":"ContainerStarted","Data":"462a6e1d71209fb49eb94ecc3fb33d077fd759156afcdc07bad81ac584a823f7"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.156932 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" event={"ID":"5c4e6d67-533f-47f9-b744-20efbb4c9df6","Type":"ContainerStarted","Data":"a615f855532e5242b248712fcd4aa42e220e7470ae0178df3ead071a173a50f2"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.161689 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-f78nf" podStartSLOduration=130.16167358 podStartE2EDuration="2m10.16167358s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:05.1333694 +0000 UTC m=+150.304961741" watchObservedRunningTime="2025-11-25 12:30:05.16167358 +0000 UTC m=+150.333265941" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.162919 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9ss4k" podStartSLOduration=129.16291121 podStartE2EDuration="2m9.16291121s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:05.161239176 +0000 UTC m=+150.332831517" watchObservedRunningTime="2025-11-25 12:30:05.16291121 +0000 UTC m=+150.334503581" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.173606 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" event={"ID":"db01a316-423a-4238-8a5b-9839aaac33ff","Type":"ContainerStarted","Data":"d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.174612 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.176589 4675 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qb49w container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.184978 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" podUID="db01a316-423a-4238-8a5b-9839aaac33ff" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.187674 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ksx8f" podStartSLOduration=130.187659284 podStartE2EDuration="2m10.187659284s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:05.18658024 +0000 UTC m=+150.358172581" watchObservedRunningTime="2025-11-25 12:30:05.187659284 +0000 UTC m=+150.359251625" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.210961 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.212618 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.712300336 +0000 UTC m=+150.883892677 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.228025 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" event={"ID":"4a8572cc-2631-4b88-8a65-7b93f16951cb","Type":"ContainerStarted","Data":"dd7cbf3fc3d0f32f6a177fddc4145de65a0b96357fdb82c39c119f8bf9397c86"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.234397 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.264637 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:05 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:05 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:05 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.264689 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.266611 4675 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-4jwnj container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.266639 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" podUID="4a8572cc-2631-4b88-8a65-7b93f16951cb" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.275103 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" podStartSLOduration=130.275090056 podStartE2EDuration="2m10.275090056s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:05.25151162 +0000 UTC m=+150.423103971" watchObservedRunningTime="2025-11-25 12:30:05.275090056 +0000 UTC m=+150.446682397" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.287138 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" event={"ID":"f94d5d6d-5e22-4aef-9fa9-b8c178d78454","Type":"ContainerStarted","Data":"3bfaec9237495af48ff2db09ef8c0d99ce6bcf4ce58bd71246002fc986f2aba3"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.301470 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" event={"ID":"80763c3d-5c23-4a04-83ed-328b856d84e8","Type":"ContainerStarted","Data":"9d26ab6e1d9ceffe05ec262ae30d62823921f5a6b713d0d80625c6011b2e7848"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.312447 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.314024 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.814009521 +0000 UTC m=+150.985601912 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.322204 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" event={"ID":"d3e50a3c-8b55-437d-a426-45594219a120","Type":"ContainerStarted","Data":"cbaad021d2d1a1830d358a9801cb3d121d2cf8677a81a5ad0809d062aaae635c"} Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.414272 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.415137 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.915117777 +0000 UTC m=+151.086710118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.415572 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.416690 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:05.916680019 +0000 UTC m=+151.088272360 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.516529 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.516887 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.016872575 +0000 UTC m=+151.188464916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.588040 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" podStartSLOduration=129.588026157 podStartE2EDuration="2m9.588026157s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:05.276280435 +0000 UTC m=+150.447872776" watchObservedRunningTime="2025-11-25 12:30:05.588026157 +0000 UTC m=+150.759618498" Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.622923 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.623199 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.12318826 +0000 UTC m=+151.294780601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.724384 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.724580 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.224550515 +0000 UTC m=+151.396142866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.724922 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.725244 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.225234546 +0000 UTC m=+151.396826887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.827467 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.827908 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.327890054 +0000 UTC m=+151.499482395 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:05 crc kubenswrapper[4675]: I1125 12:30:05.932673 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:05 crc kubenswrapper[4675]: E1125 12:30:05.933026 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.43301076 +0000 UTC m=+151.604603101 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.034445 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.034577 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.53454522 +0000 UTC m=+151.706137551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.034765 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.035230 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.535211312 +0000 UTC m=+151.706803653 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.136216 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.136577 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.636560955 +0000 UTC m=+151.808153296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.136976 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.137307 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.637297179 +0000 UTC m=+151.808889520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.238312 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.238685 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.738669664 +0000 UTC m=+151.910262005 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.270249 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:06 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:06 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:06 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.270318 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.342476 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.343118 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.843106719 +0000 UTC m=+152.014699060 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.392748 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" event={"ID":"80763c3d-5c23-4a04-83ed-328b856d84e8","Type":"ContainerStarted","Data":"ac3b35b04176fec8bf2eccf89e230e20fc18539f04df2e0c94132588719397e2"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.443955 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" event={"ID":"25770358-1610-4bfe-bff7-4acf81e687e8","Type":"ContainerStarted","Data":"97453e99b454475bfa0137d3286b30a5a4928e7059fa0e617bd3adb219230c5e"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.444948 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.447936 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.448063 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.948031949 +0000 UTC m=+152.119624290 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.448246 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.448615 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:06.948604388 +0000 UTC m=+152.120196729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.470997 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" event={"ID":"4a8572cc-2631-4b88-8a65-7b93f16951cb","Type":"ContainerStarted","Data":"935d3a12e3bb741eedff75aff197cc7d673badedef9610c00e8ed4df71d5da86"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.480824 4675 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-4jwnj container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.480871 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" podUID="4a8572cc-2631-4b88-8a65-7b93f16951cb" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.493302 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" event={"ID":"79030fb7-fcf1-4404-a300-e8c12becbb9b","Type":"ContainerStarted","Data":"cf40183dfc3325720546e6977cf2aa56f773eb3d00a32d74eb520e89920477c0"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.494348 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.521272 4675 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-kcdzn container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" start-of-body= Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.521331 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" podUID="79030fb7-fcf1-4404-a300-e8c12becbb9b" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.522477 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" podStartSLOduration=130.522453188 podStartE2EDuration="2m10.522453188s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.521976443 +0000 UTC m=+151.693568794" watchObservedRunningTime="2025-11-25 12:30:06.522453188 +0000 UTC m=+151.694045529" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.522899 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" podStartSLOduration=131.522894702 podStartE2EDuration="2m11.522894702s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.464441792 +0000 UTC m=+151.636034153" watchObservedRunningTime="2025-11-25 12:30:06.522894702 +0000 UTC m=+151.694487053" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.538666 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"598bf62dad603746ed83f201277698f0af30f281cb25065656590d9b938f5e1b"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.538715 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"2f5ce27412198d564d05e432f29a1f42354a3b1bc9a25b13e913ab63566a18f6"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.556750 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.558120 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.058105497 +0000 UTC m=+152.229697838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.574756 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" event={"ID":"2c198829-c4dc-459f-b29a-e705390ef9eb","Type":"ContainerStarted","Data":"0ef735e5c248dfbecefa8320d20ae1aaa94227042eecb2ed966a55f462e1b135"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.577910 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" event={"ID":"d3e50a3c-8b55-437d-a426-45594219a120","Type":"ContainerStarted","Data":"16f296eed767bb3e1ec1e0bde3869583da3513701d0b8aae6725d84d9ce3e98f"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.578798 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.591710 4675 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-d4t62 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.591770 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" podUID="d3e50a3c-8b55-437d-a426-45594219a120" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.597583 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" event={"ID":"5c4e6d67-533f-47f9-b744-20efbb4c9df6","Type":"ContainerStarted","Data":"8feef6436e8a0db7df0f06ebcf640aff3eb6b454a2c81f47c5f9fd2a9314343d"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.604930 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" podStartSLOduration=6.604913298 podStartE2EDuration="6.604913298s" podCreationTimestamp="2025-11-25 12:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.603540904 +0000 UTC m=+151.775133255" watchObservedRunningTime="2025-11-25 12:30:06.604913298 +0000 UTC m=+151.776505649" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.621190 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" event={"ID":"e3bafb24-e664-4215-8093-8d5bbe5cf4cd","Type":"ContainerStarted","Data":"1b7f1b2c38aa3290f7bd01c6d9ec1084cc2070f583a139f1fdf79a20f0f53f65"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.621257 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" event={"ID":"e3bafb24-e664-4215-8093-8d5bbe5cf4cd","Type":"ContainerStarted","Data":"2dee76bc03041d33cf1dc487c6016ad411d3df3122ba7161084ac39f5ad69daa"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.643371 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" event={"ID":"54711cfd-28d9-4161-90fb-a87a7a6255de","Type":"ContainerStarted","Data":"eb5a8a7e21c66198606725010f634f4654edd43fcdcb0ea49693fbce92f51e0b"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.655052 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" event={"ID":"d9eb7832-4c59-478c-997b-d0037c9e0abf","Type":"ContainerStarted","Data":"004d21a9725cf1e9e5904676ed74a311c588447c5223d1ebfff803b2bb0e79da"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.655103 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" event={"ID":"d9eb7832-4c59-478c-997b-d0037c9e0abf","Type":"ContainerStarted","Data":"4a9a85c5e4c16dbeeece7a42644dba60b23296976d7b5cf40b7ce9bf8fcf5e73"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.658553 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"960808d276428270960b45328ad20a1b8763d3776d3a465c2be2a56dd7c74bc7"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.660634 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.660978 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.16096405 +0000 UTC m=+152.332556391 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.663303 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b7fmr" event={"ID":"b962c13b-37be-46d3-91ab-5fde6214ff03","Type":"ContainerStarted","Data":"1491c1390d9e6e5b4b209acd97a7f26ea2aa48740b0be54229885da4e9fcd081"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.673885 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" podStartSLOduration=130.673866768 podStartE2EDuration="2m10.673866768s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.673803956 +0000 UTC m=+151.845396297" watchObservedRunningTime="2025-11-25 12:30:06.673866768 +0000 UTC m=+151.845459109" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.679131 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" event={"ID":"2351e3a8-bdfe-4a50-b234-fe4a84b82169","Type":"ContainerStarted","Data":"4361b0d8693bffccdd071d1b039f05ccacbd45b94609b766264a0c3282fc5746"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.679166 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" event={"ID":"2351e3a8-bdfe-4a50-b234-fe4a84b82169","Type":"ContainerStarted","Data":"bbb7762f57dcf4ac2920fb3b917495284fa66daec51f6fa3681c4addd80e2227"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.689892 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" event={"ID":"7a82f7da-19cc-4d67-8384-be35fad097ed","Type":"ContainerStarted","Data":"0b99407e0799ec07ce4a45ff933d939a28b6d6c87724fe5e007154768af98691"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.694946 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-r2pgw" event={"ID":"74086034-016c-4df6-bd1e-c4f99eb3edbe","Type":"ContainerStarted","Data":"536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.710009 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-bwvrd" podStartSLOduration=130.709991804 podStartE2EDuration="2m10.709991804s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.702578562 +0000 UTC m=+151.874170913" watchObservedRunningTime="2025-11-25 12:30:06.709991804 +0000 UTC m=+151.881584145" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.718947 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" event={"ID":"62f92a86-663f-4101-9429-ffcd900bef67","Type":"ContainerStarted","Data":"803045f7492162e066e37fc0f4b3349bd5a92b5fcf22d4e6eb60a550673dd6ef"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.728177 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" event={"ID":"0af2c49d-affd-4abb-8910-5d9c717f5b78","Type":"ContainerStarted","Data":"fd3c791d89c5f153be44dad149070001ece5c559348672ae0d35d79a6808bb85"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.742150 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" event={"ID":"962d971d-f0de-4d22-a854-e4a65644b9b8","Type":"ContainerStarted","Data":"4bc9775eba76938af4529ded007527fa65dcc45121366766224e39f0ed2ae496"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.757212 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" event={"ID":"e248d293-d2aa-4bfc-a575-24529404e90c","Type":"ContainerStarted","Data":"640945be49ca7930c557ba7dc5035d48fe445973085cf843fb66189efc4589cd"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.761387 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.762190 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.262164469 +0000 UTC m=+152.433756810 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.770620 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-6579r" event={"ID":"e3551b47-6cfc-4240-bb74-bf7cc0600a96","Type":"ContainerStarted","Data":"913ca97dad41943b65a3d7c0dfbf426b45e690f0370bcb2c4f5d875367e1f896"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.784474 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-86gc4" event={"ID":"ad85cac4-4f5d-4801-a73f-8c7c01806627","Type":"ContainerStarted","Data":"203a7f6c228eae4293e3c08945e49cbb090e2129ead1f392ef38301b901c5e34"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.784990 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.796553 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" event={"ID":"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8","Type":"ContainerStarted","Data":"9a6e701258fa78920778c22faa597b726dba56444ad7cf17c4ee19200c2bc415"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.808294 4675 generic.go:334] "Generic (PLEG): container finished" podID="4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa" containerID="3aca75e39850e81974cc84a19ae38433485a7fbe127336aec7c4dd0b49d1dee5" exitCode=0 Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.808415 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" event={"ID":"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa","Type":"ContainerDied","Data":"3aca75e39850e81974cc84a19ae38433485a7fbe127336aec7c4dd0b49d1dee5"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.809871 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-682hw" podStartSLOduration=130.809849859 podStartE2EDuration="2m10.809849859s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.730049335 +0000 UTC m=+151.901641686" watchObservedRunningTime="2025-11-25 12:30:06.809849859 +0000 UTC m=+151.981442220" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.810704 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zkcp6" podStartSLOduration=132.810693666 podStartE2EDuration="2m12.810693666s" podCreationTimestamp="2025-11-25 12:27:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.809250089 +0000 UTC m=+151.980842430" watchObservedRunningTime="2025-11-25 12:30:06.810693666 +0000 UTC m=+151.982286007" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.813263 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"1e56c39c64e5a1af043ad2a3b5b34f3e302a772338f972aadf6a1e855ecd417e"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.813308 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f246ae5edc1e52a1c9da679814907d9f96ca8adf14a06114e5d588b964f9e5e7"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.813842 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.830315 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" event={"ID":"f47c7634-aeac-48cb-aeaa-dc06956728ff","Type":"ContainerStarted","Data":"4c515af61af5f00143074a6eae2a160c227732b1619a986dacb4dadacff6d7b4"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.847478 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" event={"ID":"f94d5d6d-5e22-4aef-9fa9-b8c178d78454","Type":"ContainerStarted","Data":"9304fb4c0b2d975d8411a8cb9fcd41ec6934ec6a86a344b4573e8003cabff1cf"} Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.848615 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.848667 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.852568 4675 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9hj2h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.852887 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.864139 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.872943 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.372926078 +0000 UTC m=+152.544518419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.880723 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:30:06 crc kubenswrapper[4675]: I1125 12:30:06.969180 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:06 crc kubenswrapper[4675]: E1125 12:30:06.970608 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.470591772 +0000 UTC m=+152.642184113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.074650 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.075147 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.575136091 +0000 UTC m=+152.746728432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.095701 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-f9zd9" podStartSLOduration=131.095684769 podStartE2EDuration="2m11.095684769s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:06.959903595 +0000 UTC m=+152.131495946" watchObservedRunningTime="2025-11-25 12:30:07.095684769 +0000 UTC m=+152.267277110" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.175660 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.175998 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.675984729 +0000 UTC m=+152.847577070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.232277 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mqfhd" podStartSLOduration=132.232256868 podStartE2EDuration="2m12.232256868s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.096434654 +0000 UTC m=+152.268026995" watchObservedRunningTime="2025-11-25 12:30:07.232256868 +0000 UTC m=+152.403849209" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.267793 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:07 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:07 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:07 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.267876 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.277168 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.277680 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.777646203 +0000 UTC m=+152.949238544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.378236 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.378879 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.878863983 +0000 UTC m=+153.050456324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.453485 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-r2pgw" podStartSLOduration=132.453447047 podStartE2EDuration="2m12.453447047s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.362693547 +0000 UTC m=+152.534285908" watchObservedRunningTime="2025-11-25 12:30:07.453447047 +0000 UTC m=+152.625039408" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.482698 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.483499 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:07.983387159 +0000 UTC m=+153.154979500 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.532332 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4mv8m" podStartSLOduration=131.532291129 podStartE2EDuration="2m11.532291129s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.458203631 +0000 UTC m=+152.629795972" watchObservedRunningTime="2025-11-25 12:30:07.532291129 +0000 UTC m=+152.703883470" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.585467 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.585874 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.08585382 +0000 UTC m=+153.257446161 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.661107 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6k9qw" podStartSLOduration=131.661085736 podStartE2EDuration="2m11.661085736s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.659669779 +0000 UTC m=+152.831262120" watchObservedRunningTime="2025-11-25 12:30:07.661085736 +0000 UTC m=+152.832678077" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.687261 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.687650 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.187637038 +0000 UTC m=+153.359229379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.712103 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cm2s5" podStartSLOduration=131.712083083 podStartE2EDuration="2m11.712083083s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.707479943 +0000 UTC m=+152.879072294" watchObservedRunningTime="2025-11-25 12:30:07.712083083 +0000 UTC m=+152.883675424" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.730632 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-wh2qp" podStartSLOduration=131.730608945 podStartE2EDuration="2m11.730608945s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.728171786 +0000 UTC m=+152.899764127" watchObservedRunningTime="2025-11-25 12:30:07.730608945 +0000 UTC m=+152.902201286" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.786047 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.788781 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.788911 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.288890999 +0000 UTC m=+153.460483340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.789107 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.789405 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.289397926 +0000 UTC m=+153.460990267 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.795754 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v5q5t" podStartSLOduration=131.795735492 podStartE2EDuration="2m11.795735492s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.787968129 +0000 UTC m=+152.959560470" watchObservedRunningTime="2025-11-25 12:30:07.795735492 +0000 UTC m=+152.967327833" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.854201 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" event={"ID":"4da7f1b1-e92f-41ae-8f2e-9d265b32d0aa","Type":"ContainerStarted","Data":"b7e86091c40a0922d4e4b0e4f8199013c9437d1f15f31d1920ff7b34c0cf9b1d"} Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.855827 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" event={"ID":"c4a9d2d1-a49d-4103-9791-e5e8e7a04392","Type":"ContainerStarted","Data":"7aa8724a2a39da85b993ea645cf64f6ee495cd14718cd9bdf0c80ec7d40efc2b"} Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.858567 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" event={"ID":"62f92a86-663f-4101-9429-ffcd900bef67","Type":"ContainerStarted","Data":"0c181574190dfc3ce01376bd406375f421e4a9d65befa390370b1e02a448edf3"} Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.862022 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" event={"ID":"80763c3d-5c23-4a04-83ed-328b856d84e8","Type":"ContainerStarted","Data":"4cf340212237a3412b17459ed2dadbd6048c5166e7ff4be009c3d3eeb2fe8352"} Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.863580 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-86gc4" event={"ID":"ad85cac4-4f5d-4801-a73f-8c7c01806627","Type":"ContainerStarted","Data":"a51d029c6f6d3dd3d96af953e6bbed0e3b41d3fca1e54f331f833a1821770e94"} Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.868952 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" event={"ID":"b3bd8a07-24ba-4e2e-9c0c-ec6fce2ddff8","Type":"ContainerStarted","Data":"f3b7c70554c64ebe6b8a7375f7f66473304eeacecce869fc01dc24b893042f54"} Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.870444 4675 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-kcdzn container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" start-of-body= Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.870479 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" podUID="79030fb7-fcf1-4404-a300-e8c12becbb9b" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.870525 4675 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-d4t62 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.870557 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" podUID="d3e50a3c-8b55-437d-a426-45594219a120" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.870600 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.870612 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.890287 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.890478 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.39044953 +0000 UTC m=+153.562041871 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.890560 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.890909 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.390898335 +0000 UTC m=+153.562490746 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.891682 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.972601 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4jwnj" Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.992215 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.992335 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.49231472 +0000 UTC m=+153.663907061 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:07 crc kubenswrapper[4675]: I1125 12:30:07.994022 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:07 crc kubenswrapper[4675]: E1125 12:30:07.995911 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.495896487 +0000 UTC m=+153.667488908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.070587 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-86gc4" podStartSLOduration=11.070572035 podStartE2EDuration="11.070572035s" podCreationTimestamp="2025-11-25 12:29:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:08.066235613 +0000 UTC m=+153.237827954" watchObservedRunningTime="2025-11-25 12:30:08.070572035 +0000 UTC m=+153.242164376" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.071332 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-6579r" podStartSLOduration=10.071328029 podStartE2EDuration="10.071328029s" podCreationTimestamp="2025-11-25 12:29:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:07.969857052 +0000 UTC m=+153.141449403" watchObservedRunningTime="2025-11-25 12:30:08.071328029 +0000 UTC m=+153.242920370" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.109857 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nftsb" podStartSLOduration=132.109837401 podStartE2EDuration="2m12.109837401s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:08.107845876 +0000 UTC m=+153.279438207" watchObservedRunningTime="2025-11-25 12:30:08.109837401 +0000 UTC m=+153.281429742" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.110619 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.110753 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.610727799 +0000 UTC m=+153.782320140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.110937 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.111236 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.611228866 +0000 UTC m=+153.782821207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.211910 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.212327 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.712307961 +0000 UTC m=+153.883900302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.212421 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.212916 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.7128999 +0000 UTC m=+153.884492231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.220970 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-vmnz8" podStartSLOduration=132.220953932 podStartE2EDuration="2m12.220953932s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:08.218956818 +0000 UTC m=+153.390549159" watchObservedRunningTime="2025-11-25 12:30:08.220953932 +0000 UTC m=+153.392546263" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.265397 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:08 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:08 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:08 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.265459 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.313367 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.313492 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.813467149 +0000 UTC m=+153.985059490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.313542 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.313951 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.813935774 +0000 UTC m=+153.985528115 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.403705 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" podStartSLOduration=132.403686342 podStartE2EDuration="2m12.403686342s" podCreationTimestamp="2025-11-25 12:27:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:08.403080582 +0000 UTC m=+153.574672933" watchObservedRunningTime="2025-11-25 12:30:08.403686342 +0000 UTC m=+153.575278683" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.414147 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.414292 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.914263185 +0000 UTC m=+154.085855526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.414453 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.414777 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:08.914767352 +0000 UTC m=+154.086359783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.515414 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.515574 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.015550587 +0000 UTC m=+154.187142928 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.515856 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.516272 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.016252679 +0000 UTC m=+154.187845020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.616639 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.616783 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.116759006 +0000 UTC m=+154.288351347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.616979 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.617405 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.117372056 +0000 UTC m=+154.288964407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.717843 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.718019 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.218002837 +0000 UTC m=+154.389595178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.718104 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.718338 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.218332108 +0000 UTC m=+154.389924449 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.818709 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.818890 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.318866505 +0000 UTC m=+154.490458846 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.818930 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.819230 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.319223177 +0000 UTC m=+154.490815518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.832105 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-5sbb6" podStartSLOduration=133.832081895 podStartE2EDuration="2m13.832081895s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:08.657282794 +0000 UTC m=+153.828875125" watchObservedRunningTime="2025-11-25 12:30:08.832081895 +0000 UTC m=+154.003674236" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.833056 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" podStartSLOduration=133.833046726 podStartE2EDuration="2m13.833046726s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:08.828720616 +0000 UTC m=+154.000312967" watchObservedRunningTime="2025-11-25 12:30:08.833046726 +0000 UTC m=+154.004639067" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.878978 4675 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-rgkhp container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.879047 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" podUID="25770358-1610-4bfe-bff7-4acf81e687e8" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.880460 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-d4t62" Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.919545 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.919729 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.419701012 +0000 UTC m=+154.591293353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:08 crc kubenswrapper[4675]: I1125 12:30:08.921960 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:08 crc kubenswrapper[4675]: E1125 12:30:08.922188 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.422176473 +0000 UTC m=+154.593768814 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.022711 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.022892 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.522863155 +0000 UTC m=+154.694455496 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.023298 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.024167 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.524157848 +0000 UTC m=+154.695750189 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.124281 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.124465 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.624435427 +0000 UTC m=+154.796027778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.124639 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.125223 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.625214172 +0000 UTC m=+154.796806513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.225163 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.225320 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.725296185 +0000 UTC m=+154.896888526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.225404 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.225882 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.725866354 +0000 UTC m=+154.897458695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.260322 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:09 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:09 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:09 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.260380 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.326129 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.326352 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.826323348 +0000 UTC m=+154.997915689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.326467 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.326841 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.826826875 +0000 UTC m=+154.998419216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.427575 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.427939 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:09.927920161 +0000 UTC m=+155.099512512 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.529840 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.530237 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.030225415 +0000 UTC m=+155.201817756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.632234 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.632344 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.132319644 +0000 UTC m=+155.303911985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.632562 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.632867 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.132853982 +0000 UTC m=+155.304446323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.669124 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.669737 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.691743 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.691936 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.733406 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.733583 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.233555245 +0000 UTC m=+155.405147586 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.733611 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.733644 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79922123-d9af-46f9-9e27-22877fd78f32-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.733673 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79922123-d9af-46f9-9e27-22877fd78f32-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.733983 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.233971427 +0000 UTC m=+155.405563768 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.756373 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mh8rn"] Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.762548 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.767194 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.789312 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.805605 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mh8rn"] Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.836738 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.837077 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79922123-d9af-46f9-9e27-22877fd78f32-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.837112 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79922123-d9af-46f9-9e27-22877fd78f32-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.837617 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.337586135 +0000 UTC m=+155.509178476 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.837650 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79922123-d9af-46f9-9e27-22877fd78f32-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.888330 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79922123-d9af-46f9-9e27-22877fd78f32-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.888437 4675 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-kcdzn container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.888475 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" podUID="79030fb7-fcf1-4404-a300-e8c12becbb9b" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.909346 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" event={"ID":"c4a9d2d1-a49d-4103-9791-e5e8e7a04392","Type":"ContainerStarted","Data":"facc5a26ac4bdd83366278eaf57a383f08e013f6117081e4884bc2b4c594914c"} Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.928085 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p265m"] Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.930083 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.938390 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-catalog-content\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.938458 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-utilities\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.938491 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.938532 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxk82\" (UniqueName: \"kubernetes.io/projected/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-kube-api-access-vxk82\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:09 crc kubenswrapper[4675]: E1125 12:30:09.938888 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.438872967 +0000 UTC m=+155.610465308 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.942221 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.970506 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:30:09 crc kubenswrapper[4675]: I1125 12:30:09.987276 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040047 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.040189 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.540163109 +0000 UTC m=+155.711755450 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040236 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-utilities\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040288 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040337 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-utilities\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040380 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxk82\" (UniqueName: \"kubernetes.io/projected/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-kube-api-access-vxk82\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040425 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6495s\" (UniqueName: \"kubernetes.io/projected/1f8affce-79eb-41ed-bea6-befbdd902706-kube-api-access-6495s\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040467 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-catalog-content\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040501 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-catalog-content\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.040971 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-catalog-content\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.041445 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-utilities\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.044228 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.544211281 +0000 UTC m=+155.715803622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.051960 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p265m"] Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.141589 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.141803 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.641777722 +0000 UTC m=+155.813370063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.141969 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6495s\" (UniqueName: \"kubernetes.io/projected/1f8affce-79eb-41ed-bea6-befbdd902706-kube-api-access-6495s\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.142070 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-catalog-content\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.142161 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.142198 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-utilities\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.142593 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.642576708 +0000 UTC m=+155.814169049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.142715 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-catalog-content\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.142889 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-utilities\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.154581 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pfcfp"] Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.155470 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.183518 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfcfp"] Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.211678 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxk82\" (UniqueName: \"kubernetes.io/projected/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-kube-api-access-vxk82\") pod \"certified-operators-mh8rn\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.218189 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6495s\" (UniqueName: \"kubernetes.io/projected/1f8affce-79eb-41ed-bea6-befbdd902706-kube-api-access-6495s\") pod \"community-operators-p265m\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.243958 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.244303 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.744287354 +0000 UTC m=+155.915879695 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.270040 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:10 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:10 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:10 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.270088 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.294587 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.310617 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z97cr"] Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.311484 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.345018 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.345107 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-utilities\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.345147 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-catalog-content\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.345180 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7czjd\" (UniqueName: \"kubernetes.io/projected/9a185f42-001e-458c-a598-246cd31ac0a3-kube-api-access-7czjd\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.345529 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.845514664 +0000 UTC m=+156.017107015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.372645 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z97cr"] Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.391714 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.446689 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.446921 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-catalog-content\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.446950 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-utilities\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.446973 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-catalog-content\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.446994 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skpvl\" (UniqueName: \"kubernetes.io/projected/be76bd83-5c00-41da-8c40-32bde5746c7a-kube-api-access-skpvl\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.447013 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7czjd\" (UniqueName: \"kubernetes.io/projected/9a185f42-001e-458c-a598-246cd31ac0a3-kube-api-access-7czjd\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.447042 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-utilities\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.447137 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:10.947123557 +0000 UTC m=+156.118715888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.447481 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-utilities\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.447680 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-catalog-content\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.481654 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7czjd\" (UniqueName: \"kubernetes.io/projected/9a185f42-001e-458c-a598-246cd31ac0a3-kube-api-access-7czjd\") pod \"certified-operators-pfcfp\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.550551 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-catalog-content\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.550596 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skpvl\" (UniqueName: \"kubernetes.io/projected/be76bd83-5c00-41da-8c40-32bde5746c7a-kube-api-access-skpvl\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.550626 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-utilities\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.550662 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.550986 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.050974452 +0000 UTC m=+156.222566793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.551428 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-catalog-content\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.551952 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-utilities\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.633896 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skpvl\" (UniqueName: \"kubernetes.io/projected/be76bd83-5c00-41da-8c40-32bde5746c7a-kube-api-access-skpvl\") pod \"community-operators-z97cr\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.655388 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.655717 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.155701835 +0000 UTC m=+156.327294176 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.756915 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.757422 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.257382551 +0000 UTC m=+156.428974972 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.775651 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.865154 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.865552 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.365533575 +0000 UTC m=+156.537125916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.928996 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.946296 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" event={"ID":"c4a9d2d1-a49d-4103-9791-e5e8e7a04392","Type":"ContainerStarted","Data":"605203f785b43a1d124381a7ab7b6900a7fc2389e8d1c0541f7b0d59f1773828"} Nov 25 12:30:10 crc kubenswrapper[4675]: I1125 12:30:10.970038 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:10 crc kubenswrapper[4675]: E1125 12:30:10.970463 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.470427345 +0000 UTC m=+156.642019686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.080667 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.081194 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.581168494 +0000 UTC m=+156.752760835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.182779 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.183159 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.683142899 +0000 UTC m=+156.854735240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.260085 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:11 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:11 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:11 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.260144 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.283879 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.284094 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.784063869 +0000 UTC m=+156.955656210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.284382 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.284708 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.784697389 +0000 UTC m=+156.956289730 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.342019 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 12:30:11 crc kubenswrapper[4675]: W1125 12:30:11.369013 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod79922123_d9af_46f9_9e27_22877fd78f32.slice/crio-7e252cdc6c9aadc4a22de8812b8abbf34ffaf617690d4a053e6bf89a66ae3b8c WatchSource:0}: Error finding container 7e252cdc6c9aadc4a22de8812b8abbf34ffaf617690d4a053e6bf89a66ae3b8c: Status 404 returned error can't find the container with id 7e252cdc6c9aadc4a22de8812b8abbf34ffaf617690d4a053e6bf89a66ae3b8c Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.374659 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p265m"] Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.384871 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.384971 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.884950978 +0000 UTC m=+157.056543319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.385113 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.385405 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.885397891 +0000 UTC m=+157.056990232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.485760 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.486162 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:11.986146697 +0000 UTC m=+157.157739038 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.587111 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.587560 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.087534152 +0000 UTC m=+157.259126493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.703151 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.703416 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.203396558 +0000 UTC m=+157.374988899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.703603 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.703999 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.203991107 +0000 UTC m=+157.375583448 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.719900 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mh8rn"] Nov 25 12:30:11 crc kubenswrapper[4675]: W1125 12:30:11.753443 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb14b88f1_8208_4dcb_87e1_ab5ebaca22f0.slice/crio-ce79865f8be9517619c6e951afc06478bdbacde5bd8772182ff560af9d094872 WatchSource:0}: Error finding container ce79865f8be9517619c6e951afc06478bdbacde5bd8772182ff560af9d094872: Status 404 returned error can't find the container with id ce79865f8be9517619c6e951afc06478bdbacde5bd8772182ff560af9d094872 Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.805235 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.805569 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.305550427 +0000 UTC m=+157.477142768 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.805639 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.805921 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.305913069 +0000 UTC m=+157.477505410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.907673 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:11 crc kubenswrapper[4675]: E1125 12:30:11.908035 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.408018238 +0000 UTC m=+157.579610579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.936735 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.936983 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.948538 4675 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-rgkhp container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.948557 4675 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-rgkhp container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.948592 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" podUID="25770358-1610-4bfe-bff7-4acf81e687e8" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.948592 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" podUID="25770358-1610-4bfe-bff7-4acf81e687e8" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.958320 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerStarted","Data":"9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5"} Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.958359 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerStarted","Data":"3b1aba78f8f891a560ef2bd6dc0e71a6f855c87be697e46854cf1832fdc599b6"} Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.960358 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" event={"ID":"c4a9d2d1-a49d-4103-9791-e5e8e7a04392","Type":"ContainerStarted","Data":"e49e9a0362bde966518b4370cbe19ae153cc9ec6b7910960305ee297004d325f"} Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.970130 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79922123-d9af-46f9-9e27-22877fd78f32","Type":"ContainerStarted","Data":"7e252cdc6c9aadc4a22de8812b8abbf34ffaf617690d4a053e6bf89a66ae3b8c"} Nov 25 12:30:11 crc kubenswrapper[4675]: I1125 12:30:11.974684 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerStarted","Data":"ce79865f8be9517619c6e951afc06478bdbacde5bd8772182ff560af9d094872"} Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.006653 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n27dz"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.007995 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.008877 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.009169 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.509157706 +0000 UTC m=+157.680750047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.017720 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.017785 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.017898 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.017936 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.042126 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n27dz"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.044150 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.061896 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z97cr"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.109090 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfcfp"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.109612 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.109898 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg7tr\" (UniqueName: \"kubernetes.io/projected/c7371aea-9b52-464d-9f04-7cf5406580cb-kube-api-access-qg7tr\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.109941 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-catalog-content\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.110014 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-utilities\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.111188 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.61116573 +0000 UTC m=+157.782758071 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.123623 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-7dbvr" podStartSLOduration=14.123607335 podStartE2EDuration="14.123607335s" podCreationTimestamp="2025-11-25 12:29:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:12.120551766 +0000 UTC m=+157.292144097" watchObservedRunningTime="2025-11-25 12:30:12.123607335 +0000 UTC m=+157.295199676" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.214474 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-catalog-content\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.214539 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-utilities\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.214591 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg7tr\" (UniqueName: \"kubernetes.io/projected/c7371aea-9b52-464d-9f04-7cf5406580cb-kube-api-access-qg7tr\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.214628 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.214904 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.714892082 +0000 UTC m=+157.886484423 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.215109 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-utilities\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.215417 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-catalog-content\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.257920 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kcdzn" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.258585 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.261101 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg7tr\" (UniqueName: \"kubernetes.io/projected/c7371aea-9b52-464d-9f04-7cf5406580cb-kube-api-access-qg7tr\") pod \"redhat-marketplace-n27dz\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.269250 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:12 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:12 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:12 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.269307 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.315268 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.315695 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.815680388 +0000 UTC m=+157.987272729 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.318931 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9pcmx"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.319917 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.329776 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.373152 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.373444 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.380732 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pcmx"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.391484 4675 patch_prober.go:28] interesting pod/console-f9d7485db-r2pgw container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.25:8443/health\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.391539 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-r2pgw" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerName="console" probeResult="failure" output="Get \"https://10.217.0.25:8443/health\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.416523 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.418289 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:12.918268872 +0000 UTC m=+158.089861313 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.481292 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.481344 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.499270 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.517648 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.518044 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-utilities\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.518085 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s24rn\" (UniqueName: \"kubernetes.io/projected/f14072ad-cddd-4f39-ada4-c97e85dd22f7-kube-api-access-s24rn\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.518173 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-catalog-content\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.518498 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.018477339 +0000 UTC m=+158.190069680 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.631531 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.631578 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-utilities\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.631612 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s24rn\" (UniqueName: \"kubernetes.io/projected/f14072ad-cddd-4f39-ada4-c97e85dd22f7-kube-api-access-s24rn\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.631673 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-catalog-content\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.632133 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-catalog-content\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.632900 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-utilities\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.633308 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.133293951 +0000 UTC m=+158.304886292 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.699095 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s24rn\" (UniqueName: \"kubernetes.io/projected/f14072ad-cddd-4f39-ada4-c97e85dd22f7-kube-api-access-s24rn\") pod \"redhat-marketplace-9pcmx\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.737072 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.737298 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.237282351 +0000 UTC m=+158.408874692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.838399 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.838724 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.338707827 +0000 UTC m=+158.510300158 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.939525 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.939729 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.439705799 +0000 UTC m=+158.611298140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.939846 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:12 crc kubenswrapper[4675]: E1125 12:30:12.940373 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.44033834 +0000 UTC m=+158.611930681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.970554 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.993416 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xfjg4"] Nov 25 12:30:12 crc kubenswrapper[4675]: I1125 12:30:12.994520 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.000355 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.020904 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xfjg4"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.028837 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79922123-d9af-46f9-9e27-22877fd78f32","Type":"ContainerStarted","Data":"e29fc1eb9b22668c13c865c627a8828044305b665fd0be8f3256a5574b001a70"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.044239 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.044503 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-utilities\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.044540 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-catalog-content\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.044709 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.544689792 +0000 UTC m=+158.716282143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.044772 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkg6n\" (UniqueName: \"kubernetes.io/projected/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-kube-api-access-vkg6n\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.051637 4675 generic.go:334] "Generic (PLEG): container finished" podID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerID="f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4" exitCode=0 Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.051749 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerDied","Data":"f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.051777 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerStarted","Data":"ab3461a32f5150a07cde08e3ea60ac875103c3f955de9e173ae54f123944079f"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.056400 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.071052 4675 generic.go:334] "Generic (PLEG): container finished" podID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerID="09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca" exitCode=0 Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.071129 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerDied","Data":"09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.081169 4675 generic.go:334] "Generic (PLEG): container finished" podID="1f8affce-79eb-41ed-bea6-befbdd902706" containerID="9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5" exitCode=0 Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.081266 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerDied","Data":"9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.103140 4675 generic.go:334] "Generic (PLEG): container finished" podID="9a185f42-001e-458c-a598-246cd31ac0a3" containerID="539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26" exitCode=0 Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.104737 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerDied","Data":"539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.104770 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerStarted","Data":"78de9d1f48ac3e8518ec84c3c64cb68f296eb98644bb95343bcd79d0d7935414"} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.111897 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9n5x8" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.151591 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.151640 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-utilities\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.151679 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-catalog-content\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.151802 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkg6n\" (UniqueName: \"kubernetes.io/projected/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-kube-api-access-vkg6n\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.152934 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.652919349 +0000 UTC m=+158.824511690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.153119 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-utilities\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.153586 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-catalog-content\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.169777 4675 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.228703 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkg6n\" (UniqueName: \"kubernetes.io/projected/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-kube-api-access-vkg6n\") pod \"redhat-operators-xfjg4\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.252488 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.252766 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.752753134 +0000 UTC m=+158.924345475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.260915 4675 patch_prober.go:28] interesting pod/apiserver-76f77b778f-j2gbr container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]log ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]etcd ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/max-in-flight-filter ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 12:30:13 crc kubenswrapper[4675]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 25 12:30:13 crc kubenswrapper[4675]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 12:30:13 crc kubenswrapper[4675]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 12:30:13 crc kubenswrapper[4675]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 12:30:13 crc kubenswrapper[4675]: livez check failed Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.260982 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" podUID="62f92a86-663f-4101-9429-ffcd900bef67" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.266941 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:13 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:13 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:13 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.266988 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.302548 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n27dz"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.315000 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h54bl"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.316052 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.341902 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h54bl"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.358261 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.358667 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.858653697 +0000 UTC m=+159.030246038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.397918 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.459508 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.459720 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-utilities\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.459742 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-catalog-content\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.459829 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52qhc\" (UniqueName: \"kubernetes.io/projected/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-kube-api-access-52qhc\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.460220 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:13.960201306 +0000 UTC m=+159.131793647 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.565382 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-utilities\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.565730 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-catalog-content\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.565871 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52qhc\" (UniqueName: \"kubernetes.io/projected/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-kube-api-access-52qhc\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.565938 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.566298 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-utilities\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.566289 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:14.066274634 +0000 UTC m=+159.237866975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.566305 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-catalog-content\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.587742 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52qhc\" (UniqueName: \"kubernetes.io/projected/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-kube-api-access-52qhc\") pod \"redhat-operators-h54bl\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.659927 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.665007 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.665053 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.668270 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.668627 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 12:30:14.168612581 +0000 UTC m=+159.340204922 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.720834 4675 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T12:30:13.1698581Z","Handler":null,"Name":""} Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.770857 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:13 crc kubenswrapper[4675]: E1125 12:30:13.771209 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 12:30:14.271197325 +0000 UTC m=+159.442789666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-444qp" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.787313 4675 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.787353 4675 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.813272 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pcmx"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.840805 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.841779 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:13 crc kubenswrapper[4675]: W1125 12:30:13.844138 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf14072ad_cddd_4f39_ada4_c97e85dd22f7.slice/crio-a295633649da88cccff0db91ce215d5081e17641563c2734c23707519ce4fc47 WatchSource:0}: Error finding container a295633649da88cccff0db91ce215d5081e17641563c2734c23707519ce4fc47: Status 404 returned error can't find the container with id a295633649da88cccff0db91ce215d5081e17641563c2734c23707519ce4fc47 Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.844176 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.858136 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.873327 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.889523 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.890046 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37c11c87-b250-414b-8c1c-faad34d76f26-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.890087 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37c11c87-b250-414b-8c1c-faad34d76f26-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.897340 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.917779 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xfjg4"] Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.952312 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-rgkhp" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.991701 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.991835 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37c11c87-b250-414b-8c1c-faad34d76f26-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.991855 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37c11c87-b250-414b-8c1c-faad34d76f26-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:13 crc kubenswrapper[4675]: I1125 12:30:13.991921 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37c11c87-b250-414b-8c1c-faad34d76f26-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.007833 4675 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.007877 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.033230 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37c11c87-b250-414b-8c1c-faad34d76f26-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.137543 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerStarted","Data":"5d0d32ddabc6c41def5331083b2efaed6be7d47ce896721aad7521d111dd6063"} Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.156397 4675 generic.go:334] "Generic (PLEG): container finished" podID="79922123-d9af-46f9-9e27-22877fd78f32" containerID="e29fc1eb9b22668c13c865c627a8828044305b665fd0be8f3256a5574b001a70" exitCode=0 Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.156469 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79922123-d9af-46f9-9e27-22877fd78f32","Type":"ContainerDied","Data":"e29fc1eb9b22668c13c865c627a8828044305b665fd0be8f3256a5574b001a70"} Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.165340 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-444qp\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.171007 4675 generic.go:334] "Generic (PLEG): container finished" podID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerID="008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348" exitCode=0 Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.171066 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n27dz" event={"ID":"c7371aea-9b52-464d-9f04-7cf5406580cb","Type":"ContainerDied","Data":"008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348"} Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.171091 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n27dz" event={"ID":"c7371aea-9b52-464d-9f04-7cf5406580cb","Type":"ContainerStarted","Data":"8bd28f06bee488f32bf464d4ce24c41cebfc1b450a53a6d79a1495e55c6d902a"} Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.178144 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pcmx" event={"ID":"f14072ad-cddd-4f39-ada4-c97e85dd22f7","Type":"ContainerStarted","Data":"a295633649da88cccff0db91ce215d5081e17641563c2734c23707519ce4fc47"} Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.185311 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.242042 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h54bl"] Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.267175 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:14 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:14 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:14 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.267225 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:14 crc kubenswrapper[4675]: W1125 12:30:14.269043 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf25b8a59_cb5c_47eb_a2dc_7d5c0f62ee0e.slice/crio-7fac87bbb0f0aa7218fa5301e300016bfd894ef005ba916a43d71df1299985af WatchSource:0}: Error finding container 7fac87bbb0f0aa7218fa5301e300016bfd894ef005ba916a43d71df1299985af: Status 404 returned error can't find the container with id 7fac87bbb0f0aa7218fa5301e300016bfd894ef005ba916a43d71df1299985af Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.282364 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.552599 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.578379 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.618378 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79922123-d9af-46f9-9e27-22877fd78f32-kube-api-access\") pod \"79922123-d9af-46f9-9e27-22877fd78f32\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.618511 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79922123-d9af-46f9-9e27-22877fd78f32-kubelet-dir\") pod \"79922123-d9af-46f9-9e27-22877fd78f32\" (UID: \"79922123-d9af-46f9-9e27-22877fd78f32\") " Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.618888 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/79922123-d9af-46f9-9e27-22877fd78f32-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "79922123-d9af-46f9-9e27-22877fd78f32" (UID: "79922123-d9af-46f9-9e27-22877fd78f32"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.626205 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79922123-d9af-46f9-9e27-22877fd78f32-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "79922123-d9af-46f9-9e27-22877fd78f32" (UID: "79922123-d9af-46f9-9e27-22877fd78f32"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.671664 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-444qp"] Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.720217 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79922123-d9af-46f9-9e27-22877fd78f32-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:14 crc kubenswrapper[4675]: I1125 12:30:14.720449 4675 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79922123-d9af-46f9-9e27-22877fd78f32-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:14 crc kubenswrapper[4675]: W1125 12:30:14.722551 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod479f7d01_fd35_4d08_9477_70efd86ed7f0.slice/crio-383772e43310e70cd4b9e2a349dd6515103f957e3ded5c0352c11bcf0112f3ad WatchSource:0}: Error finding container 383772e43310e70cd4b9e2a349dd6515103f957e3ded5c0352c11bcf0112f3ad: Status 404 returned error can't find the container with id 383772e43310e70cd4b9e2a349dd6515103f957e3ded5c0352c11bcf0112f3ad Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.202473 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79922123-d9af-46f9-9e27-22877fd78f32","Type":"ContainerDied","Data":"7e252cdc6c9aadc4a22de8812b8abbf34ffaf617690d4a053e6bf89a66ae3b8c"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.202517 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e252cdc6c9aadc4a22de8812b8abbf34ffaf617690d4a053e6bf89a66ae3b8c" Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.202588 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.210584 4675 generic.go:334] "Generic (PLEG): container finished" podID="2c198829-c4dc-459f-b29a-e705390ef9eb" containerID="0ef735e5c248dfbecefa8320d20ae1aaa94227042eecb2ed966a55f462e1b135" exitCode=0 Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.210648 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" event={"ID":"2c198829-c4dc-459f-b29a-e705390ef9eb","Type":"ContainerDied","Data":"0ef735e5c248dfbecefa8320d20ae1aaa94227042eecb2ed966a55f462e1b135"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.215500 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"37c11c87-b250-414b-8c1c-faad34d76f26","Type":"ContainerStarted","Data":"9364acb91cf632759c7f3b0d071cc9f597a52f46fff735b9c41ffa62b05e1518"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.220843 4675 generic.go:334] "Generic (PLEG): container finished" podID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerID="2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77" exitCode=0 Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.220889 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerDied","Data":"2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.220955 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerStarted","Data":"7fac87bbb0f0aa7218fa5301e300016bfd894ef005ba916a43d71df1299985af"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.222609 4675 generic.go:334] "Generic (PLEG): container finished" podID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerID="d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395" exitCode=0 Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.222661 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pcmx" event={"ID":"f14072ad-cddd-4f39-ada4-c97e85dd22f7","Type":"ContainerDied","Data":"d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.240167 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" event={"ID":"479f7d01-fd35-4d08-9477-70efd86ed7f0","Type":"ContainerStarted","Data":"51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.240236 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" event={"ID":"479f7d01-fd35-4d08-9477-70efd86ed7f0","Type":"ContainerStarted","Data":"383772e43310e70cd4b9e2a349dd6515103f957e3ded5c0352c11bcf0112f3ad"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.250740 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.257843 4675 generic.go:334] "Generic (PLEG): container finished" podID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerID="564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217" exitCode=0 Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.257883 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerDied","Data":"564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217"} Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.263151 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:15 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:15 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:15 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.263213 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.333159 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" podStartSLOduration=140.333137 podStartE2EDuration="2m20.333137s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:15.323643312 +0000 UTC m=+160.495235673" watchObservedRunningTime="2025-11-25 12:30:15.333137 +0000 UTC m=+160.504729361" Nov 25 12:30:15 crc kubenswrapper[4675]: I1125 12:30:15.564971 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.261148 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:16 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:16 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:16 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.261206 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.284759 4675 generic.go:334] "Generic (PLEG): container finished" podID="37c11c87-b250-414b-8c1c-faad34d76f26" containerID="2557a96dbd7ef70bf29bc575cfeda85f7d9e062381f45ce30036c02c82257246" exitCode=0 Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.286188 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"37c11c87-b250-414b-8c1c-faad34d76f26","Type":"ContainerDied","Data":"2557a96dbd7ef70bf29bc575cfeda85f7d9e062381f45ce30036c02c82257246"} Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.673632 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.761422 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2c198829-c4dc-459f-b29a-e705390ef9eb-config-volume\") pod \"2c198829-c4dc-459f-b29a-e705390ef9eb\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.761943 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7dfv\" (UniqueName: \"kubernetes.io/projected/2c198829-c4dc-459f-b29a-e705390ef9eb-kube-api-access-w7dfv\") pod \"2c198829-c4dc-459f-b29a-e705390ef9eb\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.762037 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2c198829-c4dc-459f-b29a-e705390ef9eb-secret-volume\") pod \"2c198829-c4dc-459f-b29a-e705390ef9eb\" (UID: \"2c198829-c4dc-459f-b29a-e705390ef9eb\") " Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.762374 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c198829-c4dc-459f-b29a-e705390ef9eb-config-volume" (OuterVolumeSpecName: "config-volume") pod "2c198829-c4dc-459f-b29a-e705390ef9eb" (UID: "2c198829-c4dc-459f-b29a-e705390ef9eb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.786862 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c198829-c4dc-459f-b29a-e705390ef9eb-kube-api-access-w7dfv" (OuterVolumeSpecName: "kube-api-access-w7dfv") pod "2c198829-c4dc-459f-b29a-e705390ef9eb" (UID: "2c198829-c4dc-459f-b29a-e705390ef9eb"). InnerVolumeSpecName "kube-api-access-w7dfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.787289 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c198829-c4dc-459f-b29a-e705390ef9eb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2c198829-c4dc-459f-b29a-e705390ef9eb" (UID: "2c198829-c4dc-459f-b29a-e705390ef9eb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.863336 4675 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2c198829-c4dc-459f-b29a-e705390ef9eb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.863377 4675 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2c198829-c4dc-459f-b29a-e705390ef9eb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.863394 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7dfv\" (UniqueName: \"kubernetes.io/projected/2c198829-c4dc-459f-b29a-e705390ef9eb-kube-api-access-w7dfv\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.932352 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:16 crc kubenswrapper[4675]: I1125 12:30:16.937305 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-j2gbr" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.264935 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:17 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:17 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:17 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.264991 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.278538 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-86gc4" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.391142 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.393554 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg" event={"ID":"2c198829-c4dc-459f-b29a-e705390ef9eb","Type":"ContainerDied","Data":"7dca8cc42bc0efa037ccc00d234992b51ea334af8503f213c3796b8ef28b022d"} Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.393604 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dca8cc42bc0efa037ccc00d234992b51ea334af8503f213c3796b8ef28b022d" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.606889 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.614718 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/134942f4-79a7-4b14-9f21-ae027d146b44-metrics-certs\") pod \"network-metrics-daemon-whffq\" (UID: \"134942f4-79a7-4b14-9f21-ae027d146b44\") " pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.743592 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-whffq" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.798131 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.923958 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37c11c87-b250-414b-8c1c-faad34d76f26-kubelet-dir\") pod \"37c11c87-b250-414b-8c1c-faad34d76f26\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.924126 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37c11c87-b250-414b-8c1c-faad34d76f26-kube-api-access\") pod \"37c11c87-b250-414b-8c1c-faad34d76f26\" (UID: \"37c11c87-b250-414b-8c1c-faad34d76f26\") " Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.924224 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37c11c87-b250-414b-8c1c-faad34d76f26-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "37c11c87-b250-414b-8c1c-faad34d76f26" (UID: "37c11c87-b250-414b-8c1c-faad34d76f26"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:30:17 crc kubenswrapper[4675]: I1125 12:30:17.928668 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37c11c87-b250-414b-8c1c-faad34d76f26-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "37c11c87-b250-414b-8c1c-faad34d76f26" (UID: "37c11c87-b250-414b-8c1c-faad34d76f26"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.025849 4675 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37c11c87-b250-414b-8c1c-faad34d76f26-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.025890 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37c11c87-b250-414b-8c1c-faad34d76f26-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.260478 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:18 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:18 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:18 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.260539 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.345044 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-whffq"] Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.407575 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"37c11c87-b250-414b-8c1c-faad34d76f26","Type":"ContainerDied","Data":"9364acb91cf632759c7f3b0d071cc9f597a52f46fff735b9c41ffa62b05e1518"} Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.407613 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.407623 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9364acb91cf632759c7f3b0d071cc9f597a52f46fff735b9c41ffa62b05e1518" Nov 25 12:30:18 crc kubenswrapper[4675]: I1125 12:30:18.434922 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-whffq" event={"ID":"134942f4-79a7-4b14-9f21-ae027d146b44","Type":"ContainerStarted","Data":"cadc9db18a56cef0ed0cf116032231b1ded98cc92e20ec1f35555cc8ea98aaeb"} Nov 25 12:30:19 crc kubenswrapper[4675]: I1125 12:30:19.259524 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:19 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:19 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:19 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:19 crc kubenswrapper[4675]: I1125 12:30:19.259837 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:19 crc kubenswrapper[4675]: I1125 12:30:19.524726 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-whffq" event={"ID":"134942f4-79a7-4b14-9f21-ae027d146b44","Type":"ContainerStarted","Data":"3d48b77a8cb9496028e013993dd843f6b5e17420b424e79045cf77fdd700eca8"} Nov 25 12:30:20 crc kubenswrapper[4675]: I1125 12:30:20.260640 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:20 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:20 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:20 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:20 crc kubenswrapper[4675]: I1125 12:30:20.260702 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:21 crc kubenswrapper[4675]: I1125 12:30:21.259623 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:21 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:21 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:21 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:21 crc kubenswrapper[4675]: I1125 12:30:21.259974 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:21 crc kubenswrapper[4675]: I1125 12:30:21.577956 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-whffq" event={"ID":"134942f4-79a7-4b14-9f21-ae027d146b44","Type":"ContainerStarted","Data":"8df3c98bcbf030e02ca0755158c4bc50cfce8ecbe8c12f0ea3981f048f3cb1d6"} Nov 25 12:30:21 crc kubenswrapper[4675]: I1125 12:30:21.593671 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-whffq" podStartSLOduration=146.593652278 podStartE2EDuration="2m26.593652278s" podCreationTimestamp="2025-11-25 12:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:30:21.593357968 +0000 UTC m=+166.764950309" watchObservedRunningTime="2025-11-25 12:30:21.593652278 +0000 UTC m=+166.765244629" Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.018361 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.018428 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.018940 4675 patch_prober.go:28] interesting pod/downloads-7954f5f757-f78nf container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.018997 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-f78nf" podUID="e76ec096-df5f-4aa6-93d4-e2824b4dc454" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.272560 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:22 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:22 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:22 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.272687 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.370617 4675 patch_prober.go:28] interesting pod/console-f9d7485db-r2pgw container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.25:8443/health\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 25 12:30:22 crc kubenswrapper[4675]: I1125 12:30:22.370711 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-r2pgw" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerName="console" probeResult="failure" output="Get \"https://10.217.0.25:8443/health\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 25 12:30:23 crc kubenswrapper[4675]: I1125 12:30:23.259693 4675 patch_prober.go:28] interesting pod/router-default-5444994796-ztlr8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 12:30:23 crc kubenswrapper[4675]: [-]has-synced failed: reason withheld Nov 25 12:30:23 crc kubenswrapper[4675]: [+]process-running ok Nov 25 12:30:23 crc kubenswrapper[4675]: healthz check failed Nov 25 12:30:23 crc kubenswrapper[4675]: I1125 12:30:23.259750 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ztlr8" podUID="ee9554d7-9362-440a-a8b6-f6fefcd2fe49" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:30:24 crc kubenswrapper[4675]: I1125 12:30:24.259973 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:24 crc kubenswrapper[4675]: I1125 12:30:24.263330 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-ztlr8" Nov 25 12:30:30 crc kubenswrapper[4675]: I1125 12:30:30.409630 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8vh6n" Nov 25 12:30:32 crc kubenswrapper[4675]: I1125 12:30:32.024334 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-f78nf" Nov 25 12:30:32 crc kubenswrapper[4675]: I1125 12:30:32.375010 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:32 crc kubenswrapper[4675]: I1125 12:30:32.379955 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:30:34 crc kubenswrapper[4675]: I1125 12:30:34.288324 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:30:43 crc kubenswrapper[4675]: I1125 12:30:43.662994 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:30:43 crc kubenswrapper[4675]: I1125 12:30:43.663619 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:30:43 crc kubenswrapper[4675]: I1125 12:30:43.875321 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.740345 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerStarted","Data":"447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.742068 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerStarted","Data":"0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.744412 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerStarted","Data":"39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.746851 4675 generic.go:334] "Generic (PLEG): container finished" podID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerID="48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db" exitCode=0 Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.747057 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pcmx" event={"ID":"f14072ad-cddd-4f39-ada4-c97e85dd22f7","Type":"ContainerDied","Data":"48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.752854 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerStarted","Data":"01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.756522 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerStarted","Data":"71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.764362 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerStarted","Data":"eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1"} Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.766597 4675 generic.go:334] "Generic (PLEG): container finished" podID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerID="b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773" exitCode=0 Nov 25 12:30:46 crc kubenswrapper[4675]: I1125 12:30:46.766637 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n27dz" event={"ID":"c7371aea-9b52-464d-9f04-7cf5406580cb","Type":"ContainerDied","Data":"b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773"} Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.777365 4675 generic.go:334] "Generic (PLEG): container finished" podID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerID="71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae" exitCode=0 Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.777428 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerDied","Data":"71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae"} Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.779359 4675 generic.go:334] "Generic (PLEG): container finished" podID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerID="eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1" exitCode=0 Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.779436 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerDied","Data":"eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1"} Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.784636 4675 generic.go:334] "Generic (PLEG): container finished" podID="1f8affce-79eb-41ed-bea6-befbdd902706" containerID="0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de" exitCode=0 Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.784715 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerDied","Data":"0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de"} Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.790372 4675 generic.go:334] "Generic (PLEG): container finished" podID="9a185f42-001e-458c-a598-246cd31ac0a3" containerID="39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107" exitCode=0 Nov 25 12:30:47 crc kubenswrapper[4675]: I1125 12:30:47.791319 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerDied","Data":"39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107"} Nov 25 12:30:48 crc kubenswrapper[4675]: I1125 12:30:48.796940 4675 generic.go:334] "Generic (PLEG): container finished" podID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerID="447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887" exitCode=0 Nov 25 12:30:48 crc kubenswrapper[4675]: I1125 12:30:48.797025 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerDied","Data":"447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887"} Nov 25 12:30:48 crc kubenswrapper[4675]: I1125 12:30:48.798748 4675 generic.go:334] "Generic (PLEG): container finished" podID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerID="01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c" exitCode=0 Nov 25 12:30:48 crc kubenswrapper[4675]: I1125 12:30:48.798785 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerDied","Data":"01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c"} Nov 25 12:30:49 crc kubenswrapper[4675]: I1125 12:30:49.347954 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qb49w"] Nov 25 12:30:49 crc kubenswrapper[4675]: I1125 12:30:49.804647 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pcmx" event={"ID":"f14072ad-cddd-4f39-ada4-c97e85dd22f7","Type":"ContainerStarted","Data":"889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403"} Nov 25 12:30:49 crc kubenswrapper[4675]: I1125 12:30:49.824078 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9pcmx" podStartSLOduration=4.31365973 podStartE2EDuration="37.824058863s" podCreationTimestamp="2025-11-25 12:30:12 +0000 UTC" firstStartedPulling="2025-11-25 12:30:15.225144251 +0000 UTC m=+160.396736592" lastFinishedPulling="2025-11-25 12:30:48.735543384 +0000 UTC m=+193.907135725" observedRunningTime="2025-11-25 12:30:49.82026271 +0000 UTC m=+194.991855061" watchObservedRunningTime="2025-11-25 12:30:49.824058863 +0000 UTC m=+194.995651204" Nov 25 12:30:52 crc kubenswrapper[4675]: I1125 12:30:52.818744 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerStarted","Data":"a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af"} Nov 25 12:30:52 crc kubenswrapper[4675]: I1125 12:30:52.835238 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z97cr" podStartSLOduration=4.622763689 podStartE2EDuration="42.835218591s" podCreationTimestamp="2025-11-25 12:30:10 +0000 UTC" firstStartedPulling="2025-11-25 12:30:13.056086803 +0000 UTC m=+158.227679144" lastFinishedPulling="2025-11-25 12:30:51.268541705 +0000 UTC m=+196.440134046" observedRunningTime="2025-11-25 12:30:52.832435379 +0000 UTC m=+198.004027760" watchObservedRunningTime="2025-11-25 12:30:52.835218591 +0000 UTC m=+198.006810932" Nov 25 12:30:52 crc kubenswrapper[4675]: I1125 12:30:52.971307 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:52 crc kubenswrapper[4675]: I1125 12:30:52.971372 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:53 crc kubenswrapper[4675]: I1125 12:30:53.636236 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:30:54 crc kubenswrapper[4675]: I1125 12:30:54.835019 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerStarted","Data":"82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb"} Nov 25 12:30:54 crc kubenswrapper[4675]: I1125 12:30:54.854213 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mh8rn" podStartSLOduration=5.200964894 podStartE2EDuration="45.854197578s" podCreationTimestamp="2025-11-25 12:30:09 +0000 UTC" firstStartedPulling="2025-11-25 12:30:13.072258328 +0000 UTC m=+158.243850679" lastFinishedPulling="2025-11-25 12:30:53.725491022 +0000 UTC m=+198.897083363" observedRunningTime="2025-11-25 12:30:54.85031702 +0000 UTC m=+200.021909371" watchObservedRunningTime="2025-11-25 12:30:54.854197578 +0000 UTC m=+200.025789919" Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.853252 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerStarted","Data":"de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4"} Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.855709 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerStarted","Data":"a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c"} Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.857800 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerStarted","Data":"693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02"} Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.859245 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n27dz" event={"ID":"c7371aea-9b52-464d-9f04-7cf5406580cb","Type":"ContainerStarted","Data":"286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed"} Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.860445 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerStarted","Data":"207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8"} Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.871073 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p265m" podStartSLOduration=5.217677129 podStartE2EDuration="48.871054181s" podCreationTimestamp="2025-11-25 12:30:09 +0000 UTC" firstStartedPulling="2025-11-25 12:30:13.086254673 +0000 UTC m=+158.257847014" lastFinishedPulling="2025-11-25 12:30:56.739631735 +0000 UTC m=+201.911224066" observedRunningTime="2025-11-25 12:30:57.870582806 +0000 UTC m=+203.042175147" watchObservedRunningTime="2025-11-25 12:30:57.871054181 +0000 UTC m=+203.042646522" Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.887184 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pfcfp" podStartSLOduration=4.273480216 podStartE2EDuration="47.887170852s" podCreationTimestamp="2025-11-25 12:30:10 +0000 UTC" firstStartedPulling="2025-11-25 12:30:13.109705805 +0000 UTC m=+158.281298146" lastFinishedPulling="2025-11-25 12:30:56.723396441 +0000 UTC m=+201.894988782" observedRunningTime="2025-11-25 12:30:57.886317315 +0000 UTC m=+203.057909656" watchObservedRunningTime="2025-11-25 12:30:57.887170852 +0000 UTC m=+203.058763183" Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.909718 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h54bl" podStartSLOduration=3.407601248 podStartE2EDuration="44.909701774s" podCreationTimestamp="2025-11-25 12:30:13 +0000 UTC" firstStartedPulling="2025-11-25 12:30:15.222210115 +0000 UTC m=+160.393802456" lastFinishedPulling="2025-11-25 12:30:56.724310641 +0000 UTC m=+201.895902982" observedRunningTime="2025-11-25 12:30:57.908857906 +0000 UTC m=+203.080450257" watchObservedRunningTime="2025-11-25 12:30:57.909701774 +0000 UTC m=+203.081294115" Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.927971 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n27dz" podStartSLOduration=4.391452856 podStartE2EDuration="46.927946946s" podCreationTimestamp="2025-11-25 12:30:11 +0000 UTC" firstStartedPulling="2025-11-25 12:30:14.176032173 +0000 UTC m=+159.347624514" lastFinishedPulling="2025-11-25 12:30:56.712526263 +0000 UTC m=+201.884118604" observedRunningTime="2025-11-25 12:30:57.926728065 +0000 UTC m=+203.098320416" watchObservedRunningTime="2025-11-25 12:30:57.927946946 +0000 UTC m=+203.099539287" Nov 25 12:30:57 crc kubenswrapper[4675]: I1125 12:30:57.946225 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xfjg4" podStartSLOduration=4.471430815 podStartE2EDuration="45.946211187s" podCreationTimestamp="2025-11-25 12:30:12 +0000 UTC" firstStartedPulling="2025-11-25 12:30:15.260137478 +0000 UTC m=+160.431729819" lastFinishedPulling="2025-11-25 12:30:56.73491785 +0000 UTC m=+201.906510191" observedRunningTime="2025-11-25 12:30:57.944316865 +0000 UTC m=+203.115909206" watchObservedRunningTime="2025-11-25 12:30:57.946211187 +0000 UTC m=+203.117803528" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.295111 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.295157 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.337305 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.392887 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.393156 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.440297 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.777324 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.777372 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.823768 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.911120 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.930160 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.930205 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:31:00 crc kubenswrapper[4675]: I1125 12:31:00.971535 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:31:01 crc kubenswrapper[4675]: I1125 12:31:01.918761 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:31:02 crc kubenswrapper[4675]: I1125 12:31:02.332032 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:31:02 crc kubenswrapper[4675]: I1125 12:31:02.332436 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:31:02 crc kubenswrapper[4675]: I1125 12:31:02.376616 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:31:02 crc kubenswrapper[4675]: I1125 12:31:02.571196 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z97cr"] Nov 25 12:31:02 crc kubenswrapper[4675]: I1125 12:31:02.934986 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.024331 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.398975 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.399031 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.441466 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.660975 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.661025 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.703530 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.886215 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z97cr" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="registry-server" containerID="cri-o://a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af" gracePeriod=2 Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.928322 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:31:03 crc kubenswrapper[4675]: I1125 12:31:03.939664 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.240927 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.276638 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-utilities\") pod \"be76bd83-5c00-41da-8c40-32bde5746c7a\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.276806 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-catalog-content\") pod \"be76bd83-5c00-41da-8c40-32bde5746c7a\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.276888 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skpvl\" (UniqueName: \"kubernetes.io/projected/be76bd83-5c00-41da-8c40-32bde5746c7a-kube-api-access-skpvl\") pod \"be76bd83-5c00-41da-8c40-32bde5746c7a\" (UID: \"be76bd83-5c00-41da-8c40-32bde5746c7a\") " Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.277468 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-utilities" (OuterVolumeSpecName: "utilities") pod "be76bd83-5c00-41da-8c40-32bde5746c7a" (UID: "be76bd83-5c00-41da-8c40-32bde5746c7a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.298148 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be76bd83-5c00-41da-8c40-32bde5746c7a-kube-api-access-skpvl" (OuterVolumeSpecName: "kube-api-access-skpvl") pod "be76bd83-5c00-41da-8c40-32bde5746c7a" (UID: "be76bd83-5c00-41da-8c40-32bde5746c7a"). InnerVolumeSpecName "kube-api-access-skpvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.349159 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be76bd83-5c00-41da-8c40-32bde5746c7a" (UID: "be76bd83-5c00-41da-8c40-32bde5746c7a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.377663 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.377723 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skpvl\" (UniqueName: \"kubernetes.io/projected/be76bd83-5c00-41da-8c40-32bde5746c7a-kube-api-access-skpvl\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.377738 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be76bd83-5c00-41da-8c40-32bde5746c7a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.893679 4675 generic.go:334] "Generic (PLEG): container finished" podID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerID="a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af" exitCode=0 Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.893777 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerDied","Data":"a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af"} Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.893849 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z97cr" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.893870 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z97cr" event={"ID":"be76bd83-5c00-41da-8c40-32bde5746c7a","Type":"ContainerDied","Data":"ab3461a32f5150a07cde08e3ea60ac875103c3f955de9e173ae54f123944079f"} Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.893907 4675 scope.go:117] "RemoveContainer" containerID="a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.922954 4675 scope.go:117] "RemoveContainer" containerID="71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.925569 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z97cr"] Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.928583 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z97cr"] Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.938262 4675 scope.go:117] "RemoveContainer" containerID="f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.969296 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pcmx"] Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.969649 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9pcmx" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="registry-server" containerID="cri-o://889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403" gracePeriod=2 Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.977561 4675 scope.go:117] "RemoveContainer" containerID="a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af" Nov 25 12:31:04 crc kubenswrapper[4675]: E1125 12:31:04.977971 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af\": container with ID starting with a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af not found: ID does not exist" containerID="a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.978001 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af"} err="failed to get container status \"a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af\": rpc error: code = NotFound desc = could not find container \"a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af\": container with ID starting with a8b8555c80510bca6d86a2fbc707a3f27b32ca8fc13725a55bf16e218d82e0af not found: ID does not exist" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.978070 4675 scope.go:117] "RemoveContainer" containerID="71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae" Nov 25 12:31:04 crc kubenswrapper[4675]: E1125 12:31:04.978428 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae\": container with ID starting with 71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae not found: ID does not exist" containerID="71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.978455 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae"} err="failed to get container status \"71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae\": rpc error: code = NotFound desc = could not find container \"71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae\": container with ID starting with 71cf207f4a71fbfb5ee659cd49509365595dd8d7839cb39b1960df13fd73d5ae not found: ID does not exist" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.978471 4675 scope.go:117] "RemoveContainer" containerID="f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4" Nov 25 12:31:04 crc kubenswrapper[4675]: E1125 12:31:04.978794 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4\": container with ID starting with f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4 not found: ID does not exist" containerID="f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4" Nov 25 12:31:04 crc kubenswrapper[4675]: I1125 12:31:04.978831 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4"} err="failed to get container status \"f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4\": rpc error: code = NotFound desc = could not find container \"f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4\": container with ID starting with f73e57fb8c25f782414c00ee1c9deb19a142cee1849ce85392e6c552317fdff4 not found: ID does not exist" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.322142 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.389810 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-catalog-content\") pod \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.389898 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-utilities\") pod \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.390016 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s24rn\" (UniqueName: \"kubernetes.io/projected/f14072ad-cddd-4f39-ada4-c97e85dd22f7-kube-api-access-s24rn\") pod \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\" (UID: \"f14072ad-cddd-4f39-ada4-c97e85dd22f7\") " Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.391505 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-utilities" (OuterVolumeSpecName: "utilities") pod "f14072ad-cddd-4f39-ada4-c97e85dd22f7" (UID: "f14072ad-cddd-4f39-ada4-c97e85dd22f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.393557 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f14072ad-cddd-4f39-ada4-c97e85dd22f7-kube-api-access-s24rn" (OuterVolumeSpecName: "kube-api-access-s24rn") pod "f14072ad-cddd-4f39-ada4-c97e85dd22f7" (UID: "f14072ad-cddd-4f39-ada4-c97e85dd22f7"). InnerVolumeSpecName "kube-api-access-s24rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.408623 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f14072ad-cddd-4f39-ada4-c97e85dd22f7" (UID: "f14072ad-cddd-4f39-ada4-c97e85dd22f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.491699 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.491736 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14072ad-cddd-4f39-ada4-c97e85dd22f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.491750 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s24rn\" (UniqueName: \"kubernetes.io/projected/f14072ad-cddd-4f39-ada4-c97e85dd22f7-kube-api-access-s24rn\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.540146 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" path="/var/lib/kubelet/pods/be76bd83-5c00-41da-8c40-32bde5746c7a/volumes" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.900830 4675 generic.go:334] "Generic (PLEG): container finished" podID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerID="889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403" exitCode=0 Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.900914 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pcmx" event={"ID":"f14072ad-cddd-4f39-ada4-c97e85dd22f7","Type":"ContainerDied","Data":"889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403"} Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.901294 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9pcmx" event={"ID":"f14072ad-cddd-4f39-ada4-c97e85dd22f7","Type":"ContainerDied","Data":"a295633649da88cccff0db91ce215d5081e17641563c2734c23707519ce4fc47"} Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.900946 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9pcmx" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.901341 4675 scope.go:117] "RemoveContainer" containerID="889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.922579 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pcmx"] Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.926253 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9pcmx"] Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.928860 4675 scope.go:117] "RemoveContainer" containerID="48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.945064 4675 scope.go:117] "RemoveContainer" containerID="d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.956598 4675 scope.go:117] "RemoveContainer" containerID="889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403" Nov 25 12:31:05 crc kubenswrapper[4675]: E1125 12:31:05.957096 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403\": container with ID starting with 889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403 not found: ID does not exist" containerID="889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.957214 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403"} err="failed to get container status \"889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403\": rpc error: code = NotFound desc = could not find container \"889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403\": container with ID starting with 889aa454d6621bee0bea8b719ea32257da97eea38f9b218314a3da682d900403 not found: ID does not exist" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.957300 4675 scope.go:117] "RemoveContainer" containerID="48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db" Nov 25 12:31:05 crc kubenswrapper[4675]: E1125 12:31:05.958066 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db\": container with ID starting with 48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db not found: ID does not exist" containerID="48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.958101 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db"} err="failed to get container status \"48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db\": rpc error: code = NotFound desc = could not find container \"48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db\": container with ID starting with 48cc26481779263ac439b3524ca6987d6a5890f3cefce75d11b4cdcb7c9ac2db not found: ID does not exist" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.958123 4675 scope.go:117] "RemoveContainer" containerID="d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395" Nov 25 12:31:05 crc kubenswrapper[4675]: E1125 12:31:05.958384 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395\": container with ID starting with d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395 not found: ID does not exist" containerID="d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395" Nov 25 12:31:05 crc kubenswrapper[4675]: I1125 12:31:05.958456 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395"} err="failed to get container status \"d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395\": rpc error: code = NotFound desc = could not find container \"d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395\": container with ID starting with d45ec656138c0358c79e12fe22b99ea1fbe1b0fef5245aced0472ac4921b4395 not found: ID does not exist" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.372633 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h54bl"] Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.373073 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h54bl" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="registry-server" containerID="cri-o://207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8" gracePeriod=2 Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.538458 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" path="/var/lib/kubelet/pods/f14072ad-cddd-4f39-ada4-c97e85dd22f7/volumes" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.715171 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.916105 4675 generic.go:334] "Generic (PLEG): container finished" podID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerID="207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8" exitCode=0 Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.916149 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerDied","Data":"207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8"} Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.916208 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h54bl" event={"ID":"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e","Type":"ContainerDied","Data":"7fac87bbb0f0aa7218fa5301e300016bfd894ef005ba916a43d71df1299985af"} Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.916236 4675 scope.go:117] "RemoveContainer" containerID="207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.916170 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h54bl" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.916974 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52qhc\" (UniqueName: \"kubernetes.io/projected/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-kube-api-access-52qhc\") pod \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.917001 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-utilities\") pod \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.917068 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-catalog-content\") pod \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\" (UID: \"f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e\") " Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.918000 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-utilities" (OuterVolumeSpecName: "utilities") pod "f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" (UID: "f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.928045 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-kube-api-access-52qhc" (OuterVolumeSpecName: "kube-api-access-52qhc") pod "f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" (UID: "f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e"). InnerVolumeSpecName "kube-api-access-52qhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.963086 4675 scope.go:117] "RemoveContainer" containerID="447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887" Nov 25 12:31:07 crc kubenswrapper[4675]: I1125 12:31:07.987605 4675 scope.go:117] "RemoveContainer" containerID="2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.004133 4675 scope.go:117] "RemoveContainer" containerID="207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8" Nov 25 12:31:08 crc kubenswrapper[4675]: E1125 12:31:08.004549 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8\": container with ID starting with 207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8 not found: ID does not exist" containerID="207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.004605 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8"} err="failed to get container status \"207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8\": rpc error: code = NotFound desc = could not find container \"207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8\": container with ID starting with 207b84f24886d49dbb6b2eb1b2b267abb54ac5f29ade8f3be3a01924256480f8 not found: ID does not exist" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.004639 4675 scope.go:117] "RemoveContainer" containerID="447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887" Nov 25 12:31:08 crc kubenswrapper[4675]: E1125 12:31:08.004966 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887\": container with ID starting with 447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887 not found: ID does not exist" containerID="447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.005007 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887"} err="failed to get container status \"447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887\": rpc error: code = NotFound desc = could not find container \"447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887\": container with ID starting with 447e04f99fbd312d221bb8c433cf830828819db2e34e4e062599b824d77da887 not found: ID does not exist" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.005038 4675 scope.go:117] "RemoveContainer" containerID="2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77" Nov 25 12:31:08 crc kubenswrapper[4675]: E1125 12:31:08.005377 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77\": container with ID starting with 2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77 not found: ID does not exist" containerID="2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.005410 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77"} err="failed to get container status \"2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77\": rpc error: code = NotFound desc = could not find container \"2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77\": container with ID starting with 2f0f9e46516e2fa6e6102086c908461064655ba707e8231fbb8662c449101a77 not found: ID does not exist" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.020486 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.020536 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52qhc\" (UniqueName: \"kubernetes.io/projected/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-kube-api-access-52qhc\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.037116 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" (UID: "f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.121138 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.247981 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h54bl"] Nov 25 12:31:08 crc kubenswrapper[4675]: I1125 12:31:08.249844 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h54bl"] Nov 25 12:31:09 crc kubenswrapper[4675]: I1125 12:31:09.538680 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" path="/var/lib/kubelet/pods/f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e/volumes" Nov 25 12:31:10 crc kubenswrapper[4675]: I1125 12:31:10.332495 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:31:10 crc kubenswrapper[4675]: I1125 12:31:10.839284 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.572569 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfcfp"] Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.572836 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pfcfp" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="registry-server" containerID="cri-o://a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c" gracePeriod=2 Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.936353 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.945036 4675 generic.go:334] "Generic (PLEG): container finished" podID="9a185f42-001e-458c-a598-246cd31ac0a3" containerID="a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c" exitCode=0 Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.945087 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerDied","Data":"a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c"} Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.945094 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfcfp" Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.945121 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfcfp" event={"ID":"9a185f42-001e-458c-a598-246cd31ac0a3","Type":"ContainerDied","Data":"78de9d1f48ac3e8518ec84c3c64cb68f296eb98644bb95343bcd79d0d7935414"} Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.945143 4675 scope.go:117] "RemoveContainer" containerID="a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c" Nov 25 12:31:12 crc kubenswrapper[4675]: I1125 12:31:12.998033 4675 scope.go:117] "RemoveContainer" containerID="39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.026806 4675 scope.go:117] "RemoveContainer" containerID="539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.070157 4675 scope.go:117] "RemoveContainer" containerID="a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c" Nov 25 12:31:13 crc kubenswrapper[4675]: E1125 12:31:13.070601 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c\": container with ID starting with a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c not found: ID does not exist" containerID="a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.070634 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c"} err="failed to get container status \"a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c\": rpc error: code = NotFound desc = could not find container \"a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c\": container with ID starting with a14e9604bc28faa3381a28b2ec3805dcfbf3cb7e424f932f652862941112596c not found: ID does not exist" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.070661 4675 scope.go:117] "RemoveContainer" containerID="39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107" Nov 25 12:31:13 crc kubenswrapper[4675]: E1125 12:31:13.070926 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107\": container with ID starting with 39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107 not found: ID does not exist" containerID="39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.070945 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107"} err="failed to get container status \"39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107\": rpc error: code = NotFound desc = could not find container \"39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107\": container with ID starting with 39b2fd18c2aeaee0fa520ccc687127bd1a3d5500ee053d9036a3a2566be33107 not found: ID does not exist" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.070958 4675 scope.go:117] "RemoveContainer" containerID="539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26" Nov 25 12:31:13 crc kubenswrapper[4675]: E1125 12:31:13.071157 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26\": container with ID starting with 539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26 not found: ID does not exist" containerID="539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.071171 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26"} err="failed to get container status \"539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26\": rpc error: code = NotFound desc = could not find container \"539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26\": container with ID starting with 539ccbe1deb5ce55e5d1d1169e18b03190068e0ee4a59f94f2d07e2ca424ee26 not found: ID does not exist" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.092517 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7czjd\" (UniqueName: \"kubernetes.io/projected/9a185f42-001e-458c-a598-246cd31ac0a3-kube-api-access-7czjd\") pod \"9a185f42-001e-458c-a598-246cd31ac0a3\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.092609 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-utilities\") pod \"9a185f42-001e-458c-a598-246cd31ac0a3\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.092656 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-catalog-content\") pod \"9a185f42-001e-458c-a598-246cd31ac0a3\" (UID: \"9a185f42-001e-458c-a598-246cd31ac0a3\") " Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.093534 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-utilities" (OuterVolumeSpecName: "utilities") pod "9a185f42-001e-458c-a598-246cd31ac0a3" (UID: "9a185f42-001e-458c-a598-246cd31ac0a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.097167 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a185f42-001e-458c-a598-246cd31ac0a3-kube-api-access-7czjd" (OuterVolumeSpecName: "kube-api-access-7czjd") pod "9a185f42-001e-458c-a598-246cd31ac0a3" (UID: "9a185f42-001e-458c-a598-246cd31ac0a3"). InnerVolumeSpecName "kube-api-access-7czjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.133335 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a185f42-001e-458c-a598-246cd31ac0a3" (UID: "9a185f42-001e-458c-a598-246cd31ac0a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.193876 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.193932 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7czjd\" (UniqueName: \"kubernetes.io/projected/9a185f42-001e-458c-a598-246cd31ac0a3-kube-api-access-7czjd\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.193944 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a185f42-001e-458c-a598-246cd31ac0a3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.282504 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfcfp"] Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.287573 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pfcfp"] Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.537984 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" path="/var/lib/kubelet/pods/9a185f42-001e-458c-a598-246cd31ac0a3/volumes" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.662768 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.662917 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.662996 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.666414 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.667350 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef" gracePeriod=600 Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.954923 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef" exitCode=0 Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.955385 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef"} Nov 25 12:31:13 crc kubenswrapper[4675]: I1125 12:31:13.955433 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"7b2f8711e5ee95d90f413660558825ae64297fb63cbee16834cf385f8eb43f3b"} Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.374025 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" podUID="db01a316-423a-4238-8a5b-9839aaac33ff" containerName="oauth-openshift" containerID="cri-o://d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374" gracePeriod=15 Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.722040 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913512 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ssch\" (UniqueName: \"kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913604 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913643 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913671 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913701 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913742 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913777 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913805 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913898 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913936 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.913990 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.914025 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.914060 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/db01a316-423a-4238-8a5b-9839aaac33ff-audit-dir\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.914101 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") pod \"db01a316-423a-4238-8a5b-9839aaac33ff\" (UID: \"db01a316-423a-4238-8a5b-9839aaac33ff\") " Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.914655 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.915324 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.915643 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.919789 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.920072 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.920497 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.920533 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/db01a316-423a-4238-8a5b-9839aaac33ff-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.922079 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.922207 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch" (OuterVolumeSpecName: "kube-api-access-9ssch") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "kube-api-access-9ssch". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.922316 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.922793 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.922961 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.923146 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.929535 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "db01a316-423a-4238-8a5b-9839aaac33ff" (UID: "db01a316-423a-4238-8a5b-9839aaac33ff"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.962311 4675 generic.go:334] "Generic (PLEG): container finished" podID="db01a316-423a-4238-8a5b-9839aaac33ff" containerID="d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374" exitCode=0 Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.962351 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" event={"ID":"db01a316-423a-4238-8a5b-9839aaac33ff","Type":"ContainerDied","Data":"d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374"} Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.962378 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" event={"ID":"db01a316-423a-4238-8a5b-9839aaac33ff","Type":"ContainerDied","Data":"68ff8422e9d83a05634e0c00185eb4d57db30891c5d8d3a113055b92f0444c71"} Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.962393 4675 scope.go:117] "RemoveContainer" containerID="d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.962583 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qb49w" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.981948 4675 scope.go:117] "RemoveContainer" containerID="d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374" Nov 25 12:31:14 crc kubenswrapper[4675]: E1125 12:31:14.982333 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374\": container with ID starting with d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374 not found: ID does not exist" containerID="d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374" Nov 25 12:31:14 crc kubenswrapper[4675]: I1125 12:31:14.982384 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374"} err="failed to get container status \"d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374\": rpc error: code = NotFound desc = could not find container \"d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374\": container with ID starting with d359845c36cfa31b93b6c89e2900230f14302fc729dc9add266ecf8242ce0374 not found: ID does not exist" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.005456 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qb49w"] Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.007981 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qb49w"] Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.014878 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015087 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015157 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015330 4675 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/db01a316-423a-4238-8a5b-9839aaac33ff-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015417 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015496 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ssch\" (UniqueName: \"kubernetes.io/projected/db01a316-423a-4238-8a5b-9839aaac33ff-kube-api-access-9ssch\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015570 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015650 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015724 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015808 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.015909 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.016002 4675 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.016075 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.016133 4675 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/db01a316-423a-4238-8a5b-9839aaac33ff-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:15 crc kubenswrapper[4675]: I1125 12:31:15.542350 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db01a316-423a-4238-8a5b-9839aaac33ff" path="/var/lib/kubelet/pods/db01a316-423a-4238-8a5b-9839aaac33ff/volumes" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884006 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-bb968f6ff-rbrmq"] Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884704 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884717 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884727 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884733 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884930 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884935 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884943 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79922123-d9af-46f9-9e27-22877fd78f32" containerName="pruner" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884965 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="79922123-d9af-46f9-9e27-22877fd78f32" containerName="pruner" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884972 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884978 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884985 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c198829-c4dc-459f-b29a-e705390ef9eb" containerName="collect-profiles" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.884990 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c198829-c4dc-459f-b29a-e705390ef9eb" containerName="collect-profiles" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.884997 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885004 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885012 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db01a316-423a-4238-8a5b-9839aaac33ff" containerName="oauth-openshift" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885018 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="db01a316-423a-4238-8a5b-9839aaac33ff" containerName="oauth-openshift" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885025 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885032 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885040 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885047 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885056 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885062 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885072 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885077 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885087 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885093 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="extract-utilities" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885100 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885106 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885114 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885119 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="extract-content" Nov 25 12:31:18 crc kubenswrapper[4675]: E1125 12:31:18.885128 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37c11c87-b250-414b-8c1c-faad34d76f26" containerName="pruner" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885134 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="37c11c87-b250-414b-8c1c-faad34d76f26" containerName="pruner" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885215 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="db01a316-423a-4238-8a5b-9839aaac33ff" containerName="oauth-openshift" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885236 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a185f42-001e-458c-a598-246cd31ac0a3" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885246 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f25b8a59-cb5c-47eb-a2dc-7d5c0f62ee0e" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885256 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="79922123-d9af-46f9-9e27-22877fd78f32" containerName="pruner" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885265 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c198829-c4dc-459f-b29a-e705390ef9eb" containerName="collect-profiles" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885275 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14072ad-cddd-4f39-ada4-c97e85dd22f7" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885284 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="37c11c87-b250-414b-8c1c-faad34d76f26" containerName="pruner" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885295 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="be76bd83-5c00-41da-8c40-32bde5746c7a" containerName="registry-server" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.885667 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.887885 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.888121 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.892085 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.892201 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.894774 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.894894 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.894899 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.895651 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.895705 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.896077 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.896922 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.897739 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.902204 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.905854 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.910907 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-bb968f6ff-rbrmq"] Nov 25 12:31:18 crc kubenswrapper[4675]: I1125 12:31:18.912626 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069071 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-session\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069129 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-login\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069160 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkz2f\" (UniqueName: \"kubernetes.io/projected/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-kube-api-access-mkz2f\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069194 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-service-ca\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069224 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-audit-dir\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069250 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069277 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-audit-policies\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069392 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069421 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069445 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-router-certs\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069470 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069503 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-error\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069545 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.069577 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170205 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170245 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170268 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-router-certs\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170293 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170315 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-error\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170343 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170359 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170378 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-session\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170393 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-login\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170408 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkz2f\" (UniqueName: \"kubernetes.io/projected/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-kube-api-access-mkz2f\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170428 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-service-ca\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170449 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-audit-dir\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170465 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170481 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-audit-policies\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.171728 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.171863 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-audit-policies\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.172015 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-service-ca\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.170762 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-audit-dir\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.177856 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.178630 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.183474 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.184225 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-error\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.184517 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.184615 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-session\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.184656 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-user-template-login\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.186540 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-router-certs\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.195725 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.197172 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkz2f\" (UniqueName: \"kubernetes.io/projected/799c1f84-7c99-4cf6-94b8-b2722c6d19cb-kube-api-access-mkz2f\") pod \"oauth-openshift-bb968f6ff-rbrmq\" (UID: \"799c1f84-7c99-4cf6-94b8-b2722c6d19cb\") " pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.205011 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.655306 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-bb968f6ff-rbrmq"] Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.989200 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" event={"ID":"799c1f84-7c99-4cf6-94b8-b2722c6d19cb","Type":"ContainerStarted","Data":"35a81744a5959566e1b97cb06bf8158222908f39d1ad78c67e27354c5760aa92"} Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.989243 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" event={"ID":"799c1f84-7c99-4cf6-94b8-b2722c6d19cb","Type":"ContainerStarted","Data":"faab6e5b85cd41bdd97e7a04c6c41910e9b40b68611ff898b41a2d870a3ce267"} Nov 25 12:31:19 crc kubenswrapper[4675]: I1125 12:31:19.992013 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:20 crc kubenswrapper[4675]: I1125 12:31:20.013298 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" podStartSLOduration=31.0132802 podStartE2EDuration="31.0132802s" podCreationTimestamp="2025-11-25 12:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:31:20.01178058 +0000 UTC m=+225.183372921" watchObservedRunningTime="2025-11-25 12:31:20.0132802 +0000 UTC m=+225.184872541" Nov 25 12:31:20 crc kubenswrapper[4675]: I1125 12:31:20.260043 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-bb968f6ff-rbrmq" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.503574 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mh8rn"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.510371 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mh8rn" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="registry-server" containerID="cri-o://82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb" gracePeriod=30 Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.539362 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p265m"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.540037 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-p265m" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="registry-server" containerID="cri-o://de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4" gracePeriod=30 Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.554876 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9hj2h"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.555085 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerName="marketplace-operator" containerID="cri-o://e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4" gracePeriod=30 Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.568873 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gft6s"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.570200 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.581098 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n27dz"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.581372 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n27dz" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="registry-server" containerID="cri-o://286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed" gracePeriod=30 Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.586677 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xfjg4"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.587154 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xfjg4" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="registry-server" containerID="cri-o://693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02" gracePeriod=30 Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.595915 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gft6s"] Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.722223 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftddk\" (UniqueName: \"kubernetes.io/projected/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-kube-api-access-ftddk\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.722291 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.722338 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.823305 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftddk\" (UniqueName: \"kubernetes.io/projected/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-kube-api-access-ftddk\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.823356 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.823428 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.824624 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.849206 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.855735 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftddk\" (UniqueName: \"kubernetes.io/projected/b0a290f6-aa83-4c86-80ba-5f48e9a78c36-kube-api-access-ftddk\") pod \"marketplace-operator-79b997595-gft6s\" (UID: \"b0a290f6-aa83-4c86-80ba-5f48e9a78c36\") " pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.896023 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:54 crc kubenswrapper[4675]: I1125 12:31:54.985125 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.075920 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.099622 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.120550 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.127731 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.134406 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxk82\" (UniqueName: \"kubernetes.io/projected/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-kube-api-access-vxk82\") pod \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.134451 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-catalog-content\") pod \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.134485 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-utilities\") pod \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\" (UID: \"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.140367 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-kube-api-access-vxk82" (OuterVolumeSpecName: "kube-api-access-vxk82") pod "b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" (UID: "b14b88f1-8208-4dcb-87e1-ab5ebaca22f0"). InnerVolumeSpecName "kube-api-access-vxk82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.140858 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-utilities" (OuterVolumeSpecName: "utilities") pod "b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" (UID: "b14b88f1-8208-4dcb-87e1-ab5ebaca22f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.203858 4675 generic.go:334] "Generic (PLEG): container finished" podID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerID="693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02" exitCode=0 Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.203923 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerDied","Data":"693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.203954 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xfjg4" event={"ID":"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef","Type":"ContainerDied","Data":"5d0d32ddabc6c41def5331083b2efaed6be7d47ce896721aad7521d111dd6063"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.204015 4675 scope.go:117] "RemoveContainer" containerID="693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.204153 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xfjg4" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.209788 4675 generic.go:334] "Generic (PLEG): container finished" podID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerID="82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb" exitCode=0 Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.209871 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerDied","Data":"82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.209902 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mh8rn" event={"ID":"b14b88f1-8208-4dcb-87e1-ab5ebaca22f0","Type":"ContainerDied","Data":"ce79865f8be9517619c6e951afc06478bdbacde5bd8772182ff560af9d094872"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.209976 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mh8rn" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.216715 4675 generic.go:334] "Generic (PLEG): container finished" podID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerID="286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed" exitCode=0 Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.216755 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n27dz" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.216772 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n27dz" event={"ID":"c7371aea-9b52-464d-9f04-7cf5406580cb","Type":"ContainerDied","Data":"286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.218043 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n27dz" event={"ID":"c7371aea-9b52-464d-9f04-7cf5406580cb","Type":"ContainerDied","Data":"8bd28f06bee488f32bf464d4ce24c41cebfc1b450a53a6d79a1495e55c6d902a"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.225572 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" (UID: "b14b88f1-8208-4dcb-87e1-ab5ebaca22f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.225940 4675 generic.go:334] "Generic (PLEG): container finished" podID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerID="e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4" exitCode=0 Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.225987 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.226016 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" event={"ID":"412d82e0-9b92-45f4-8030-8f91fffe3e9a","Type":"ContainerDied","Data":"e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.226052 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9hj2h" event={"ID":"412d82e0-9b92-45f4-8030-8f91fffe3e9a","Type":"ContainerDied","Data":"3d0838cc3e9afd068798d4733b64ac2c557595cfb7c320a980a4c481d0c7a836"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.230686 4675 generic.go:334] "Generic (PLEG): container finished" podID="1f8affce-79eb-41ed-bea6-befbdd902706" containerID="de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4" exitCode=0 Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.230730 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerDied","Data":"de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.230773 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p265m" event={"ID":"1f8affce-79eb-41ed-bea6-befbdd902706","Type":"ContainerDied","Data":"3b1aba78f8f891a560ef2bd6dc0e71a6f855c87be697e46854cf1832fdc599b6"} Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.230778 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p265m" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.235141 4675 scope.go:117] "RemoveContainer" containerID="01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237371 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6495s\" (UniqueName: \"kubernetes.io/projected/1f8affce-79eb-41ed-bea6-befbdd902706-kube-api-access-6495s\") pod \"1f8affce-79eb-41ed-bea6-befbdd902706\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237430 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-catalog-content\") pod \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237486 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlvcg\" (UniqueName: \"kubernetes.io/projected/412d82e0-9b92-45f4-8030-8f91fffe3e9a-kube-api-access-qlvcg\") pod \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237510 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-catalog-content\") pod \"1f8affce-79eb-41ed-bea6-befbdd902706\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237533 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkg6n\" (UniqueName: \"kubernetes.io/projected/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-kube-api-access-vkg6n\") pod \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237560 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-utilities\") pod \"1f8affce-79eb-41ed-bea6-befbdd902706\" (UID: \"1f8affce-79eb-41ed-bea6-befbdd902706\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237585 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-catalog-content\") pod \"c7371aea-9b52-464d-9f04-7cf5406580cb\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237621 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-trusted-ca\") pod \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237668 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-utilities\") pod \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\" (UID: \"40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237696 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-operator-metrics\") pod \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\" (UID: \"412d82e0-9b92-45f4-8030-8f91fffe3e9a\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237740 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg7tr\" (UniqueName: \"kubernetes.io/projected/c7371aea-9b52-464d-9f04-7cf5406580cb-kube-api-access-qg7tr\") pod \"c7371aea-9b52-464d-9f04-7cf5406580cb\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.237769 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-utilities\") pod \"c7371aea-9b52-464d-9f04-7cf5406580cb\" (UID: \"c7371aea-9b52-464d-9f04-7cf5406580cb\") " Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.238061 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxk82\" (UniqueName: \"kubernetes.io/projected/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-kube-api-access-vxk82\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.238088 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.238103 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.238645 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-utilities" (OuterVolumeSpecName: "utilities") pod "40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" (UID: "40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.238899 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "412d82e0-9b92-45f4-8030-8f91fffe3e9a" (UID: "412d82e0-9b92-45f4-8030-8f91fffe3e9a"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.241556 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7371aea-9b52-464d-9f04-7cf5406580cb-kube-api-access-qg7tr" (OuterVolumeSpecName: "kube-api-access-qg7tr") pod "c7371aea-9b52-464d-9f04-7cf5406580cb" (UID: "c7371aea-9b52-464d-9f04-7cf5406580cb"). InnerVolumeSpecName "kube-api-access-qg7tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.241583 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "412d82e0-9b92-45f4-8030-8f91fffe3e9a" (UID: "412d82e0-9b92-45f4-8030-8f91fffe3e9a"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.241913 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-utilities" (OuterVolumeSpecName: "utilities") pod "1f8affce-79eb-41ed-bea6-befbdd902706" (UID: "1f8affce-79eb-41ed-bea6-befbdd902706"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.248364 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-kube-api-access-vkg6n" (OuterVolumeSpecName: "kube-api-access-vkg6n") pod "40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" (UID: "40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef"). InnerVolumeSpecName "kube-api-access-vkg6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.254647 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-utilities" (OuterVolumeSpecName: "utilities") pod "c7371aea-9b52-464d-9f04-7cf5406580cb" (UID: "c7371aea-9b52-464d-9f04-7cf5406580cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.256030 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/412d82e0-9b92-45f4-8030-8f91fffe3e9a-kube-api-access-qlvcg" (OuterVolumeSpecName: "kube-api-access-qlvcg") pod "412d82e0-9b92-45f4-8030-8f91fffe3e9a" (UID: "412d82e0-9b92-45f4-8030-8f91fffe3e9a"). InnerVolumeSpecName "kube-api-access-qlvcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.256729 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8affce-79eb-41ed-bea6-befbdd902706-kube-api-access-6495s" (OuterVolumeSpecName: "kube-api-access-6495s") pod "1f8affce-79eb-41ed-bea6-befbdd902706" (UID: "1f8affce-79eb-41ed-bea6-befbdd902706"). InnerVolumeSpecName "kube-api-access-6495s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.261375 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c7371aea-9b52-464d-9f04-7cf5406580cb" (UID: "c7371aea-9b52-464d-9f04-7cf5406580cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.265171 4675 scope.go:117] "RemoveContainer" containerID="564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.293359 4675 scope.go:117] "RemoveContainer" containerID="693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.294844 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02\": container with ID starting with 693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02 not found: ID does not exist" containerID="693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.294888 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02"} err="failed to get container status \"693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02\": rpc error: code = NotFound desc = could not find container \"693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02\": container with ID starting with 693cbb349014538d10d22ece2ef285622ebb96fd583bb821a471f444a12f8b02 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.294919 4675 scope.go:117] "RemoveContainer" containerID="01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.295883 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c\": container with ID starting with 01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c not found: ID does not exist" containerID="01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.295922 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c"} err="failed to get container status \"01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c\": rpc error: code = NotFound desc = could not find container \"01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c\": container with ID starting with 01a38865b34db6596def90e840baab36617e47c9e4dc67cff5b958737b38cb5c not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.295948 4675 scope.go:117] "RemoveContainer" containerID="564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.296428 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217\": container with ID starting with 564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217 not found: ID does not exist" containerID="564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.296460 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217"} err="failed to get container status \"564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217\": rpc error: code = NotFound desc = could not find container \"564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217\": container with ID starting with 564a7d5824d2d38b9c83b7bca25f0a62761409fda16ae61f8c3924acd943e217 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.296480 4675 scope.go:117] "RemoveContainer" containerID="82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.314357 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f8affce-79eb-41ed-bea6-befbdd902706" (UID: "1f8affce-79eb-41ed-bea6-befbdd902706"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.333936 4675 scope.go:117] "RemoveContainer" containerID="eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349161 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349196 4675 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349210 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg7tr\" (UniqueName: \"kubernetes.io/projected/c7371aea-9b52-464d-9f04-7cf5406580cb-kube-api-access-qg7tr\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349221 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349233 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6495s\" (UniqueName: \"kubernetes.io/projected/1f8affce-79eb-41ed-bea6-befbdd902706-kube-api-access-6495s\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349245 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlvcg\" (UniqueName: \"kubernetes.io/projected/412d82e0-9b92-45f4-8030-8f91fffe3e9a-kube-api-access-qlvcg\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349256 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349268 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkg6n\" (UniqueName: \"kubernetes.io/projected/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-kube-api-access-vkg6n\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349279 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8affce-79eb-41ed-bea6-befbdd902706-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349290 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7371aea-9b52-464d-9f04-7cf5406580cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.349301 4675 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/412d82e0-9b92-45f4-8030-8f91fffe3e9a-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.359570 4675 scope.go:117] "RemoveContainer" containerID="09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.360394 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" (UID: "40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.372329 4675 scope.go:117] "RemoveContainer" containerID="82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.378803 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb\": container with ID starting with 82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb not found: ID does not exist" containerID="82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.378899 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb"} err="failed to get container status \"82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb\": rpc error: code = NotFound desc = could not find container \"82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb\": container with ID starting with 82ee40476a33d77aa693b21af5f6028079847d8260649ee0cbaaf3ff913d94fb not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.378968 4675 scope.go:117] "RemoveContainer" containerID="eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.379691 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1\": container with ID starting with eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1 not found: ID does not exist" containerID="eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.379748 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1"} err="failed to get container status \"eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1\": rpc error: code = NotFound desc = could not find container \"eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1\": container with ID starting with eed257043f4b0b5d1d76990f35e3199bbc84c38e2759088483afe0ca48fd10f1 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.379780 4675 scope.go:117] "RemoveContainer" containerID="09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.380259 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca\": container with ID starting with 09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca not found: ID does not exist" containerID="09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.380295 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca"} err="failed to get container status \"09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca\": rpc error: code = NotFound desc = could not find container \"09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca\": container with ID starting with 09f0a12021245c7bc6b6b5496e271c8cf163f4b7169365ccad5592bb6b43cdca not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.380315 4675 scope.go:117] "RemoveContainer" containerID="286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.393438 4675 scope.go:117] "RemoveContainer" containerID="b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.406317 4675 scope.go:117] "RemoveContainer" containerID="008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.418786 4675 scope.go:117] "RemoveContainer" containerID="286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.419720 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed\": container with ID starting with 286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed not found: ID does not exist" containerID="286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.419752 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed"} err="failed to get container status \"286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed\": rpc error: code = NotFound desc = could not find container \"286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed\": container with ID starting with 286c9c30e457ba700b9587dbba166e7cfa9c75c6b0d2ac08ae4491abd9c506ed not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.419775 4675 scope.go:117] "RemoveContainer" containerID="b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.420390 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773\": container with ID starting with b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773 not found: ID does not exist" containerID="b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.420408 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773"} err="failed to get container status \"b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773\": rpc error: code = NotFound desc = could not find container \"b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773\": container with ID starting with b60827cac634ef6e1c8ef9fa0ee7ad9a1d7ba71d916a83f78ec91f0173ca4773 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.420422 4675 scope.go:117] "RemoveContainer" containerID="008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.420940 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348\": container with ID starting with 008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348 not found: ID does not exist" containerID="008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.420963 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348"} err="failed to get container status \"008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348\": rpc error: code = NotFound desc = could not find container \"008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348\": container with ID starting with 008435b6a96e5a352550c52e701515346337e3eaf5165cfed64229b06bcd4348 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.420979 4675 scope.go:117] "RemoveContainer" containerID="e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.435789 4675 scope.go:117] "RemoveContainer" containerID="e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.436219 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4\": container with ID starting with e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4 not found: ID does not exist" containerID="e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.436243 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4"} err="failed to get container status \"e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4\": rpc error: code = NotFound desc = could not find container \"e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4\": container with ID starting with e4e7c2861534b9d1e8206e94407536334e112e96a966dae4071176554c8020a4 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.436262 4675 scope.go:117] "RemoveContainer" containerID="de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.454378 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.457429 4675 scope.go:117] "RemoveContainer" containerID="0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.484343 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gft6s"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.492346 4675 scope.go:117] "RemoveContainer" containerID="9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.509104 4675 scope.go:117] "RemoveContainer" containerID="de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.510326 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4\": container with ID starting with de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4 not found: ID does not exist" containerID="de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.510372 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4"} err="failed to get container status \"de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4\": rpc error: code = NotFound desc = could not find container \"de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4\": container with ID starting with de9a034c8f684d84e17ef048e9132598553f7f69f46639b2d762fec5b3657af4 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.510398 4675 scope.go:117] "RemoveContainer" containerID="0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.511638 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de\": container with ID starting with 0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de not found: ID does not exist" containerID="0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.511668 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de"} err="failed to get container status \"0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de\": rpc error: code = NotFound desc = could not find container \"0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de\": container with ID starting with 0a07615fb367244121e2f5858a84cf5f879c5cbc4d4aa29c17646a9a773ac6de not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.511707 4675 scope.go:117] "RemoveContainer" containerID="9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5" Nov 25 12:31:55 crc kubenswrapper[4675]: E1125 12:31:55.511962 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5\": container with ID starting with 9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5 not found: ID does not exist" containerID="9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.512007 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5"} err="failed to get container status \"9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5\": rpc error: code = NotFound desc = could not find container \"9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5\": container with ID starting with 9b72f2052d825b19630681ad2f7cef36cd898261903bafdd178c62160225ddd5 not found: ID does not exist" Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.574947 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xfjg4"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.575015 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xfjg4"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.575883 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mh8rn"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.580043 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mh8rn"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.596155 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n27dz"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.598152 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n27dz"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.608032 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9hj2h"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.609362 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9hj2h"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.617033 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p265m"] Nov 25 12:31:55 crc kubenswrapper[4675]: I1125 12:31:55.622856 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-p265m"] Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.238445 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" event={"ID":"b0a290f6-aa83-4c86-80ba-5f48e9a78c36","Type":"ContainerStarted","Data":"cd1f4eec97de3ce1af15bb9dbccf08307fbb14d23c2d18e7d9c36734d55bf544"} Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.238726 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" event={"ID":"b0a290f6-aa83-4c86-80ba-5f48e9a78c36","Type":"ContainerStarted","Data":"dd60c94d097ef118b0aba46605f4531621b04c7497186d46265da9318c12e95e"} Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.239057 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.241541 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.278347 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-gft6s" podStartSLOduration=2.278331767 podStartE2EDuration="2.278331767s" podCreationTimestamp="2025-11-25 12:31:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:31:56.262236132 +0000 UTC m=+261.433828473" watchObservedRunningTime="2025-11-25 12:31:56.278331767 +0000 UTC m=+261.449924108" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322349 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jhj2z"] Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322558 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322577 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322588 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322598 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322608 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322616 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322624 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322633 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322641 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322646 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322658 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322664 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322672 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322677 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322684 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322689 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322696 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322702 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322712 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322719 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="extract-content" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322728 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322734 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="extract-utilities" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322743 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerName="marketplace-operator" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322748 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerName="marketplace-operator" Nov 25 12:31:56 crc kubenswrapper[4675]: E1125 12:31:56.322756 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322763 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322863 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" containerName="marketplace-operator" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322873 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322881 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322887 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.322894 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" containerName="registry-server" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.323540 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.325873 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.335048 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jhj2z"] Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.370459 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzgqm\" (UniqueName: \"kubernetes.io/projected/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-kube-api-access-hzgqm\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.370524 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-utilities\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.370558 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-catalog-content\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.471627 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-catalog-content\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.471746 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzgqm\" (UniqueName: \"kubernetes.io/projected/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-kube-api-access-hzgqm\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.471793 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-utilities\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.472230 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-utilities\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.472471 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-catalog-content\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.489465 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzgqm\" (UniqueName: \"kubernetes.io/projected/4d40a555-6ac6-4f2b-aab9-d9586a0607fc-kube-api-access-hzgqm\") pod \"certified-operators-jhj2z\" (UID: \"4d40a555-6ac6-4f2b-aab9-d9586a0607fc\") " pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:56 crc kubenswrapper[4675]: I1125 12:31:56.641432 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.036181 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jhj2z"] Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.246587 4675 generic.go:334] "Generic (PLEG): container finished" podID="4d40a555-6ac6-4f2b-aab9-d9586a0607fc" containerID="667465cb58c06b3649c3eeaf36ea671cc73997f88a56f8499e238636586f1209" exitCode=0 Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.246658 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jhj2z" event={"ID":"4d40a555-6ac6-4f2b-aab9-d9586a0607fc","Type":"ContainerDied","Data":"667465cb58c06b3649c3eeaf36ea671cc73997f88a56f8499e238636586f1209"} Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.247113 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jhj2z" event={"ID":"4d40a555-6ac6-4f2b-aab9-d9586a0607fc","Type":"ContainerStarted","Data":"8e954d4ee06fbcfceee062252054571485f88361dd82b3baac099cf282bd2bee"} Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.324889 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2p8qm"] Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.326072 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.329388 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p8qm"] Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.332161 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.383664 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-catalog-content\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.384166 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-utilities\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.384281 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l88nv\" (UniqueName: \"kubernetes.io/projected/0db468b2-8e51-4126-a30e-e8f562240e79-kube-api-access-l88nv\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.485996 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-utilities\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.486343 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l88nv\" (UniqueName: \"kubernetes.io/projected/0db468b2-8e51-4126-a30e-e8f562240e79-kube-api-access-l88nv\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.486474 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-catalog-content\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.486651 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-utilities\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.488566 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-catalog-content\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.531585 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l88nv\" (UniqueName: \"kubernetes.io/projected/0db468b2-8e51-4126-a30e-e8f562240e79-kube-api-access-l88nv\") pod \"redhat-marketplace-2p8qm\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.539760 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f8affce-79eb-41ed-bea6-befbdd902706" path="/var/lib/kubelet/pods/1f8affce-79eb-41ed-bea6-befbdd902706/volumes" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.541648 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef" path="/var/lib/kubelet/pods/40a08e2f-2eb9-4d4c-8e9c-c8cfe100e5ef/volumes" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.542939 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="412d82e0-9b92-45f4-8030-8f91fffe3e9a" path="/var/lib/kubelet/pods/412d82e0-9b92-45f4-8030-8f91fffe3e9a/volumes" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.544463 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b14b88f1-8208-4dcb-87e1-ab5ebaca22f0" path="/var/lib/kubelet/pods/b14b88f1-8208-4dcb-87e1-ab5ebaca22f0/volumes" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.545518 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7371aea-9b52-464d-9f04-7cf5406580cb" path="/var/lib/kubelet/pods/c7371aea-9b52-464d-9f04-7cf5406580cb/volumes" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.648320 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:31:57 crc kubenswrapper[4675]: I1125 12:31:57.912600 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p8qm"] Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.252677 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jhj2z" event={"ID":"4d40a555-6ac6-4f2b-aab9-d9586a0607fc","Type":"ContainerStarted","Data":"a43d8c7d255474ba660aed22b26b9f2d8af26d63de3fd6e59968ca18ce1057ba"} Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.254892 4675 generic.go:334] "Generic (PLEG): container finished" podID="0db468b2-8e51-4126-a30e-e8f562240e79" containerID="6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35" exitCode=0 Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.254975 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerDied","Data":"6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35"} Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.255017 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerStarted","Data":"c5ef3dd6ea47861f3d0707b32d2a2cffcd67d4630b63a08dc9a6b343e383ebd7"} Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.723675 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kxkld"] Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.729531 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.730770 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kxkld"] Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.732803 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.804438 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-utilities\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.804479 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-catalog-content\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.804531 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t6hz\" (UniqueName: \"kubernetes.io/projected/f4d43246-7b39-42d1-8b5d-7255662aaf2d-kube-api-access-8t6hz\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.906156 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-utilities\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.906210 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-catalog-content\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.906289 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t6hz\" (UniqueName: \"kubernetes.io/projected/f4d43246-7b39-42d1-8b5d-7255662aaf2d-kube-api-access-8t6hz\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.906835 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-utilities\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.907169 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-catalog-content\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:58 crc kubenswrapper[4675]: I1125 12:31:58.927652 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t6hz\" (UniqueName: \"kubernetes.io/projected/f4d43246-7b39-42d1-8b5d-7255662aaf2d-kube-api-access-8t6hz\") pod \"redhat-operators-kxkld\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.047741 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.264375 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerStarted","Data":"b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a"} Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.269002 4675 generic.go:334] "Generic (PLEG): container finished" podID="4d40a555-6ac6-4f2b-aab9-d9586a0607fc" containerID="a43d8c7d255474ba660aed22b26b9f2d8af26d63de3fd6e59968ca18ce1057ba" exitCode=0 Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.269056 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jhj2z" event={"ID":"4d40a555-6ac6-4f2b-aab9-d9586a0607fc","Type":"ContainerDied","Data":"a43d8c7d255474ba660aed22b26b9f2d8af26d63de3fd6e59968ca18ce1057ba"} Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.432486 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kxkld"] Nov 25 12:31:59 crc kubenswrapper[4675]: W1125 12:31:59.441437 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4d43246_7b39_42d1_8b5d_7255662aaf2d.slice/crio-61c27810b131b6393bcad274f7398ce9394d95c3c539672968e19b5795def561 WatchSource:0}: Error finding container 61c27810b131b6393bcad274f7398ce9394d95c3c539672968e19b5795def561: Status 404 returned error can't find the container with id 61c27810b131b6393bcad274f7398ce9394d95c3c539672968e19b5795def561 Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.722113 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qll4m"] Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.723262 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.724653 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.738157 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qll4m"] Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.817702 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-utilities\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.818116 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq9jn\" (UniqueName: \"kubernetes.io/projected/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-kube-api-access-xq9jn\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.818178 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-catalog-content\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.919036 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-utilities\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.919290 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq9jn\" (UniqueName: \"kubernetes.io/projected/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-kube-api-access-xq9jn\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.919414 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-catalog-content\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.919579 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-utilities\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.919788 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-catalog-content\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:31:59 crc kubenswrapper[4675]: I1125 12:31:59.940795 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq9jn\" (UniqueName: \"kubernetes.io/projected/fe96f0fa-cfaf-4889-9219-3626cb45d0e0-kube-api-access-xq9jn\") pod \"community-operators-qll4m\" (UID: \"fe96f0fa-cfaf-4889-9219-3626cb45d0e0\") " pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.062806 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.279142 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerID="ad9ab0324601b168fc9f9f7f9116784539cd8b12aa1839a9e7d6249f36c526f3" exitCode=0 Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.279343 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerDied","Data":"ad9ab0324601b168fc9f9f7f9116784539cd8b12aa1839a9e7d6249f36c526f3"} Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.279458 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerStarted","Data":"61c27810b131b6393bcad274f7398ce9394d95c3c539672968e19b5795def561"} Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.281917 4675 generic.go:334] "Generic (PLEG): container finished" podID="0db468b2-8e51-4126-a30e-e8f562240e79" containerID="b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a" exitCode=0 Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.282045 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerDied","Data":"b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a"} Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.290081 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jhj2z" event={"ID":"4d40a555-6ac6-4f2b-aab9-d9586a0607fc","Type":"ContainerStarted","Data":"b86b4e9b74bde9c9b55a898046f64591cc4b9377965af780396b19a88c02eb3e"} Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.320509 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jhj2z" podStartSLOduration=1.892390211 podStartE2EDuration="4.320490054s" podCreationTimestamp="2025-11-25 12:31:56 +0000 UTC" firstStartedPulling="2025-11-25 12:31:57.248750431 +0000 UTC m=+262.420342772" lastFinishedPulling="2025-11-25 12:31:59.676850274 +0000 UTC m=+264.848442615" observedRunningTime="2025-11-25 12:32:00.313390345 +0000 UTC m=+265.484982696" watchObservedRunningTime="2025-11-25 12:32:00.320490054 +0000 UTC m=+265.492082415" Nov 25 12:32:00 crc kubenswrapper[4675]: I1125 12:32:00.467011 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qll4m"] Nov 25 12:32:00 crc kubenswrapper[4675]: W1125 12:32:00.474493 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe96f0fa_cfaf_4889_9219_3626cb45d0e0.slice/crio-58c1954935cfb222fa8faf176af6348944405593442f419e7c39a8ca4adc16fe WatchSource:0}: Error finding container 58c1954935cfb222fa8faf176af6348944405593442f419e7c39a8ca4adc16fe: Status 404 returned error can't find the container with id 58c1954935cfb222fa8faf176af6348944405593442f419e7c39a8ca4adc16fe Nov 25 12:32:01 crc kubenswrapper[4675]: I1125 12:32:01.296208 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerStarted","Data":"f87aef11c3eb1cd75c9554b55525e35164afadd2411c61657b3790cd36bf9b73"} Nov 25 12:32:01 crc kubenswrapper[4675]: I1125 12:32:01.299087 4675 generic.go:334] "Generic (PLEG): container finished" podID="fe96f0fa-cfaf-4889-9219-3626cb45d0e0" containerID="af424354705a5aa672d4c4ce409a42122b1da1be9217b6f8828d78b2b3eb7e48" exitCode=0 Nov 25 12:32:01 crc kubenswrapper[4675]: I1125 12:32:01.299417 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qll4m" event={"ID":"fe96f0fa-cfaf-4889-9219-3626cb45d0e0","Type":"ContainerDied","Data":"af424354705a5aa672d4c4ce409a42122b1da1be9217b6f8828d78b2b3eb7e48"} Nov 25 12:32:01 crc kubenswrapper[4675]: I1125 12:32:01.299452 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qll4m" event={"ID":"fe96f0fa-cfaf-4889-9219-3626cb45d0e0","Type":"ContainerStarted","Data":"58c1954935cfb222fa8faf176af6348944405593442f419e7c39a8ca4adc16fe"} Nov 25 12:32:01 crc kubenswrapper[4675]: I1125 12:32:01.302413 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerStarted","Data":"d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86"} Nov 25 12:32:01 crc kubenswrapper[4675]: I1125 12:32:01.340337 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2p8qm" podStartSLOduration=1.5886249270000001 podStartE2EDuration="4.340317142s" podCreationTimestamp="2025-11-25 12:31:57 +0000 UTC" firstStartedPulling="2025-11-25 12:31:58.256174284 +0000 UTC m=+263.427766625" lastFinishedPulling="2025-11-25 12:32:01.007866499 +0000 UTC m=+266.179458840" observedRunningTime="2025-11-25 12:32:01.338180667 +0000 UTC m=+266.509773028" watchObservedRunningTime="2025-11-25 12:32:01.340317142 +0000 UTC m=+266.511909493" Nov 25 12:32:02 crc kubenswrapper[4675]: I1125 12:32:02.308121 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerID="f87aef11c3eb1cd75c9554b55525e35164afadd2411c61657b3790cd36bf9b73" exitCode=0 Nov 25 12:32:02 crc kubenswrapper[4675]: I1125 12:32:02.308279 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerDied","Data":"f87aef11c3eb1cd75c9554b55525e35164afadd2411c61657b3790cd36bf9b73"} Nov 25 12:32:02 crc kubenswrapper[4675]: I1125 12:32:02.311273 4675 generic.go:334] "Generic (PLEG): container finished" podID="fe96f0fa-cfaf-4889-9219-3626cb45d0e0" containerID="0c6f4897c08650a4f08a71850351105d152ebf6b673f16d68c7148bd57ef5a56" exitCode=0 Nov 25 12:32:02 crc kubenswrapper[4675]: I1125 12:32:02.311861 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qll4m" event={"ID":"fe96f0fa-cfaf-4889-9219-3626cb45d0e0","Type":"ContainerDied","Data":"0c6f4897c08650a4f08a71850351105d152ebf6b673f16d68c7148bd57ef5a56"} Nov 25 12:32:03 crc kubenswrapper[4675]: I1125 12:32:03.317843 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerStarted","Data":"daed327306aea14da9eb60498e026160f1ce069d1bdde8ab6f8c9e17441e510d"} Nov 25 12:32:03 crc kubenswrapper[4675]: I1125 12:32:03.320349 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qll4m" event={"ID":"fe96f0fa-cfaf-4889-9219-3626cb45d0e0","Type":"ContainerStarted","Data":"066b81f20909c84f295007a64aca216828104aa36e677949dfd69f663a9c7bf8"} Nov 25 12:32:03 crc kubenswrapper[4675]: I1125 12:32:03.343474 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kxkld" podStartSLOduration=2.538082947 podStartE2EDuration="5.343458775s" podCreationTimestamp="2025-11-25 12:31:58 +0000 UTC" firstStartedPulling="2025-11-25 12:32:00.281181195 +0000 UTC m=+265.452773536" lastFinishedPulling="2025-11-25 12:32:03.086557023 +0000 UTC m=+268.258149364" observedRunningTime="2025-11-25 12:32:03.341548979 +0000 UTC m=+268.513141350" watchObservedRunningTime="2025-11-25 12:32:03.343458775 +0000 UTC m=+268.515051116" Nov 25 12:32:03 crc kubenswrapper[4675]: I1125 12:32:03.367569 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qll4m" podStartSLOduration=2.737037609 podStartE2EDuration="4.367545651s" podCreationTimestamp="2025-11-25 12:31:59 +0000 UTC" firstStartedPulling="2025-11-25 12:32:01.300996142 +0000 UTC m=+266.472588483" lastFinishedPulling="2025-11-25 12:32:02.931504184 +0000 UTC m=+268.103096525" observedRunningTime="2025-11-25 12:32:03.362220824 +0000 UTC m=+268.533813175" watchObservedRunningTime="2025-11-25 12:32:03.367545651 +0000 UTC m=+268.539137992" Nov 25 12:32:06 crc kubenswrapper[4675]: I1125 12:32:06.641608 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:32:06 crc kubenswrapper[4675]: I1125 12:32:06.642195 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:32:06 crc kubenswrapper[4675]: I1125 12:32:06.691436 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:32:07 crc kubenswrapper[4675]: I1125 12:32:07.378648 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jhj2z" Nov 25 12:32:07 crc kubenswrapper[4675]: I1125 12:32:07.649502 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:32:07 crc kubenswrapper[4675]: I1125 12:32:07.650581 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:32:07 crc kubenswrapper[4675]: I1125 12:32:07.702636 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:32:08 crc kubenswrapper[4675]: I1125 12:32:08.381118 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 12:32:09 crc kubenswrapper[4675]: I1125 12:32:09.048230 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:32:09 crc kubenswrapper[4675]: I1125 12:32:09.048286 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:32:09 crc kubenswrapper[4675]: I1125 12:32:09.089039 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:32:09 crc kubenswrapper[4675]: I1125 12:32:09.389439 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 12:32:10 crc kubenswrapper[4675]: I1125 12:32:10.063988 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:32:10 crc kubenswrapper[4675]: I1125 12:32:10.064888 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:32:10 crc kubenswrapper[4675]: I1125 12:32:10.115201 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:32:10 crc kubenswrapper[4675]: I1125 12:32:10.394198 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qll4m" Nov 25 12:33:13 crc kubenswrapper[4675]: I1125 12:33:13.662326 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:33:13 crc kubenswrapper[4675]: I1125 12:33:13.663175 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:33:43 crc kubenswrapper[4675]: I1125 12:33:43.662755 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:33:43 crc kubenswrapper[4675]: I1125 12:33:43.663439 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:34:13 crc kubenswrapper[4675]: I1125 12:34:13.663107 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:34:13 crc kubenswrapper[4675]: I1125 12:34:13.663739 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:34:13 crc kubenswrapper[4675]: I1125 12:34:13.663780 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:34:13 crc kubenswrapper[4675]: I1125 12:34:13.664303 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7b2f8711e5ee95d90f413660558825ae64297fb63cbee16834cf385f8eb43f3b"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:34:13 crc kubenswrapper[4675]: I1125 12:34:13.664361 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://7b2f8711e5ee95d90f413660558825ae64297fb63cbee16834cf385f8eb43f3b" gracePeriod=600 Nov 25 12:34:14 crc kubenswrapper[4675]: I1125 12:34:14.037431 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="7b2f8711e5ee95d90f413660558825ae64297fb63cbee16834cf385f8eb43f3b" exitCode=0 Nov 25 12:34:14 crc kubenswrapper[4675]: I1125 12:34:14.037496 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"7b2f8711e5ee95d90f413660558825ae64297fb63cbee16834cf385f8eb43f3b"} Nov 25 12:34:14 crc kubenswrapper[4675]: I1125 12:34:14.038041 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"e784210085ab24e660932292acaaa3ea622d553f34a1b01288fb02da752c810b"} Nov 25 12:34:14 crc kubenswrapper[4675]: I1125 12:34:14.038105 4675 scope.go:117] "RemoveContainer" containerID="82e4d2f621ccbcc0f84f3434acf9cb01262a8acb2de51aa38dba64ad6e3108ef" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.224586 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rlsqk"] Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.225960 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.235902 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rlsqk"] Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391309 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391375 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-bound-sa-token\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391412 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391443 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-registry-certificates\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391465 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391494 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-trusted-ca\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391525 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-registry-tls\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.391553 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdb62\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-kube-api-access-bdb62\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.416699 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492537 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-bound-sa-token\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492616 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-registry-certificates\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492646 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492687 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-trusted-ca\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492719 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-registry-tls\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492749 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdb62\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-kube-api-access-bdb62\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.492796 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.493296 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.494045 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-registry-certificates\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.494218 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-trusted-ca\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.499009 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-registry-tls\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.503151 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.508681 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-bound-sa-token\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.511281 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdb62\" (UniqueName: \"kubernetes.io/projected/d29c2f9c-2c9c-47b0-ba24-95c6f60162ea-kube-api-access-bdb62\") pod \"image-registry-66df7c8f76-rlsqk\" (UID: \"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea\") " pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.548346 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:28 crc kubenswrapper[4675]: I1125 12:34:28.745045 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rlsqk"] Nov 25 12:34:28 crc kubenswrapper[4675]: W1125 12:34:28.754405 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd29c2f9c_2c9c_47b0_ba24_95c6f60162ea.slice/crio-5d7576319c05e65944bd167cb8129005de8fa80b34678e36f2cde51e0e1c93b7 WatchSource:0}: Error finding container 5d7576319c05e65944bd167cb8129005de8fa80b34678e36f2cde51e0e1c93b7: Status 404 returned error can't find the container with id 5d7576319c05e65944bd167cb8129005de8fa80b34678e36f2cde51e0e1c93b7 Nov 25 12:34:29 crc kubenswrapper[4675]: I1125 12:34:29.119425 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" event={"ID":"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea","Type":"ContainerStarted","Data":"148bbfab053bb949a96eeeffe17ae929cf4b5176b1d2e6f402820e8abb5a34bc"} Nov 25 12:34:29 crc kubenswrapper[4675]: I1125 12:34:29.119474 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" event={"ID":"d29c2f9c-2c9c-47b0-ba24-95c6f60162ea","Type":"ContainerStarted","Data":"5d7576319c05e65944bd167cb8129005de8fa80b34678e36f2cde51e0e1c93b7"} Nov 25 12:34:29 crc kubenswrapper[4675]: I1125 12:34:29.119582 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:29 crc kubenswrapper[4675]: I1125 12:34:29.142161 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" podStartSLOduration=1.142124384 podStartE2EDuration="1.142124384s" podCreationTimestamp="2025-11-25 12:34:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:34:29.139803526 +0000 UTC m=+414.311395867" watchObservedRunningTime="2025-11-25 12:34:29.142124384 +0000 UTC m=+414.313716725" Nov 25 12:34:48 crc kubenswrapper[4675]: I1125 12:34:48.552473 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-rlsqk" Nov 25 12:34:48 crc kubenswrapper[4675]: I1125 12:34:48.615788 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-444qp"] Nov 25 12:35:13 crc kubenswrapper[4675]: I1125 12:35:13.667500 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" podUID="479f7d01-fd35-4d08-9477-70efd86ed7f0" containerName="registry" containerID="cri-o://51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70" gracePeriod=30 Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.001140 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131126 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/479f7d01-fd35-4d08-9477-70efd86ed7f0-ca-trust-extracted\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131574 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131683 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-trusted-ca\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131757 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-bound-sa-token\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131785 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/479f7d01-fd35-4d08-9477-70efd86ed7f0-installation-pull-secrets\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131866 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-certificates\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.131963 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-tls\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.132021 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rhjg\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-kube-api-access-6rhjg\") pod \"479f7d01-fd35-4d08-9477-70efd86ed7f0\" (UID: \"479f7d01-fd35-4d08-9477-70efd86ed7f0\") " Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.132919 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.133193 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.139871 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-kube-api-access-6rhjg" (OuterVolumeSpecName: "kube-api-access-6rhjg") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "kube-api-access-6rhjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.140180 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.141427 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/479f7d01-fd35-4d08-9477-70efd86ed7f0-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.144468 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.145137 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.153130 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/479f7d01-fd35-4d08-9477-70efd86ed7f0-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "479f7d01-fd35-4d08-9477-70efd86ed7f0" (UID: "479f7d01-fd35-4d08-9477-70efd86ed7f0"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233635 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rhjg\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-kube-api-access-6rhjg\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233679 4675 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/479f7d01-fd35-4d08-9477-70efd86ed7f0-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233694 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233710 4675 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233723 4675 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/479f7d01-fd35-4d08-9477-70efd86ed7f0-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233735 4675 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.233746 4675 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/479f7d01-fd35-4d08-9477-70efd86ed7f0-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.356764 4675 generic.go:334] "Generic (PLEG): container finished" podID="479f7d01-fd35-4d08-9477-70efd86ed7f0" containerID="51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70" exitCode=0 Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.356828 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" event={"ID":"479f7d01-fd35-4d08-9477-70efd86ed7f0","Type":"ContainerDied","Data":"51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70"} Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.356866 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" event={"ID":"479f7d01-fd35-4d08-9477-70efd86ed7f0","Type":"ContainerDied","Data":"383772e43310e70cd4b9e2a349dd6515103f957e3ded5c0352c11bcf0112f3ad"} Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.356890 4675 scope.go:117] "RemoveContainer" containerID="51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.356902 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-444qp" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.376245 4675 scope.go:117] "RemoveContainer" containerID="51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70" Nov 25 12:35:14 crc kubenswrapper[4675]: E1125 12:35:14.377413 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70\": container with ID starting with 51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70 not found: ID does not exist" containerID="51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.377498 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70"} err="failed to get container status \"51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70\": rpc error: code = NotFound desc = could not find container \"51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70\": container with ID starting with 51bbd3e0e04fd3a8c859b4dd49bdf8fc5373022689d1c16cb0e496870f915e70 not found: ID does not exist" Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.389313 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-444qp"] Nov 25 12:35:14 crc kubenswrapper[4675]: I1125 12:35:14.398568 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-444qp"] Nov 25 12:35:15 crc kubenswrapper[4675]: I1125 12:35:15.546675 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="479f7d01-fd35-4d08-9477-70efd86ed7f0" path="/var/lib/kubelet/pods/479f7d01-fd35-4d08-9477-70efd86ed7f0/volumes" Nov 25 12:36:13 crc kubenswrapper[4675]: I1125 12:36:13.662584 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:36:13 crc kubenswrapper[4675]: I1125 12:36:13.663121 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:36:43 crc kubenswrapper[4675]: I1125 12:36:43.661776 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:36:43 crc kubenswrapper[4675]: I1125 12:36:43.662397 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.662858 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.663962 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.664052 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.665167 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e784210085ab24e660932292acaaa3ea622d553f34a1b01288fb02da752c810b"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.665238 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://e784210085ab24e660932292acaaa3ea622d553f34a1b01288fb02da752c810b" gracePeriod=600 Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.966520 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="e784210085ab24e660932292acaaa3ea622d553f34a1b01288fb02da752c810b" exitCode=0 Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.966596 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"e784210085ab24e660932292acaaa3ea622d553f34a1b01288fb02da752c810b"} Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.966970 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"4b3c1e1ef52f16910a00a563d21fee3feb92e0dcf81ba47871ed8bc9505c87d4"} Nov 25 12:37:13 crc kubenswrapper[4675]: I1125 12:37:13.966997 4675 scope.go:117] "RemoveContainer" containerID="7b2f8711e5ee95d90f413660558825ae64297fb63cbee16834cf385f8eb43f3b" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.014446 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mvcg9"] Nov 25 12:38:14 crc kubenswrapper[4675]: E1125 12:38:14.015221 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="479f7d01-fd35-4d08-9477-70efd86ed7f0" containerName="registry" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.015237 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="479f7d01-fd35-4d08-9477-70efd86ed7f0" containerName="registry" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.015346 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="479f7d01-fd35-4d08-9477-70efd86ed7f0" containerName="registry" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.015771 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.018651 4675 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-72lbr" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.018739 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.019836 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.021586 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-6lknr"] Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.022440 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6lknr" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.025980 4675 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wgcdt" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.027148 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mvcg9"] Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.043807 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-6lknr"] Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.046897 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-j2mvp"] Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.047628 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.051757 4675 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-57d5w" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.073244 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-j2mvp"] Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.179328 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29nlv\" (UniqueName: \"kubernetes.io/projected/23b012ff-b039-47d5-84d0-276ef8ab953b-kube-api-access-29nlv\") pod \"cert-manager-webhook-5655c58dd6-j2mvp\" (UID: \"23b012ff-b039-47d5-84d0-276ef8ab953b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.179374 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmmph\" (UniqueName: \"kubernetes.io/projected/9624922e-a281-4931-97a5-47ae5c1e78f4-kube-api-access-fmmph\") pod \"cert-manager-cainjector-7f985d654d-mvcg9\" (UID: \"9624922e-a281-4931-97a5-47ae5c1e78f4\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.179403 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6k9p\" (UniqueName: \"kubernetes.io/projected/ff650df4-ed32-43ee-99cf-25ea4d4b55d8-kube-api-access-c6k9p\") pod \"cert-manager-5b446d88c5-6lknr\" (UID: \"ff650df4-ed32-43ee-99cf-25ea4d4b55d8\") " pod="cert-manager/cert-manager-5b446d88c5-6lknr" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.281320 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6k9p\" (UniqueName: \"kubernetes.io/projected/ff650df4-ed32-43ee-99cf-25ea4d4b55d8-kube-api-access-c6k9p\") pod \"cert-manager-5b446d88c5-6lknr\" (UID: \"ff650df4-ed32-43ee-99cf-25ea4d4b55d8\") " pod="cert-manager/cert-manager-5b446d88c5-6lknr" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.281752 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29nlv\" (UniqueName: \"kubernetes.io/projected/23b012ff-b039-47d5-84d0-276ef8ab953b-kube-api-access-29nlv\") pod \"cert-manager-webhook-5655c58dd6-j2mvp\" (UID: \"23b012ff-b039-47d5-84d0-276ef8ab953b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.281911 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmmph\" (UniqueName: \"kubernetes.io/projected/9624922e-a281-4931-97a5-47ae5c1e78f4-kube-api-access-fmmph\") pod \"cert-manager-cainjector-7f985d654d-mvcg9\" (UID: \"9624922e-a281-4931-97a5-47ae5c1e78f4\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.299634 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29nlv\" (UniqueName: \"kubernetes.io/projected/23b012ff-b039-47d5-84d0-276ef8ab953b-kube-api-access-29nlv\") pod \"cert-manager-webhook-5655c58dd6-j2mvp\" (UID: \"23b012ff-b039-47d5-84d0-276ef8ab953b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.299690 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6k9p\" (UniqueName: \"kubernetes.io/projected/ff650df4-ed32-43ee-99cf-25ea4d4b55d8-kube-api-access-c6k9p\") pod \"cert-manager-5b446d88c5-6lknr\" (UID: \"ff650df4-ed32-43ee-99cf-25ea4d4b55d8\") " pod="cert-manager/cert-manager-5b446d88c5-6lknr" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.302987 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmmph\" (UniqueName: \"kubernetes.io/projected/9624922e-a281-4931-97a5-47ae5c1e78f4-kube-api-access-fmmph\") pod \"cert-manager-cainjector-7f985d654d-mvcg9\" (UID: \"9624922e-a281-4931-97a5-47ae5c1e78f4\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.340834 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.349886 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-6lknr" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.363162 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.614826 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-j2mvp"] Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.621853 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.857485 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mvcg9"] Nov 25 12:38:14 crc kubenswrapper[4675]: W1125 12:38:14.863460 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9624922e_a281_4931_97a5_47ae5c1e78f4.slice/crio-c5b49788caa4d8c4e0b0db4d4542b28ec53e1a0caef713b1f4e7b3098969bef6 WatchSource:0}: Error finding container c5b49788caa4d8c4e0b0db4d4542b28ec53e1a0caef713b1f4e7b3098969bef6: Status 404 returned error can't find the container with id c5b49788caa4d8c4e0b0db4d4542b28ec53e1a0caef713b1f4e7b3098969bef6 Nov 25 12:38:14 crc kubenswrapper[4675]: I1125 12:38:14.875505 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-6lknr"] Nov 25 12:38:15 crc kubenswrapper[4675]: I1125 12:38:15.296707 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" event={"ID":"23b012ff-b039-47d5-84d0-276ef8ab953b","Type":"ContainerStarted","Data":"374415f0c6c24f547ed6da7ccdbc84ff031abcdb6473fcd50483ded9d57e9f7d"} Nov 25 12:38:15 crc kubenswrapper[4675]: I1125 12:38:15.298209 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" event={"ID":"9624922e-a281-4931-97a5-47ae5c1e78f4","Type":"ContainerStarted","Data":"c5b49788caa4d8c4e0b0db4d4542b28ec53e1a0caef713b1f4e7b3098969bef6"} Nov 25 12:38:15 crc kubenswrapper[4675]: I1125 12:38:15.310238 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-6lknr" event={"ID":"ff650df4-ed32-43ee-99cf-25ea4d4b55d8","Type":"ContainerStarted","Data":"2a6976e59cc28b70c41169c9c24b32946804bf86a2f5486d169cb24c9648e289"} Nov 25 12:38:18 crc kubenswrapper[4675]: I1125 12:38:18.335961 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" event={"ID":"23b012ff-b039-47d5-84d0-276ef8ab953b","Type":"ContainerStarted","Data":"a77ac03f12d77c301a04b6cae466dd6703215bb2f14c222911842f4e9296cd50"} Nov 25 12:38:18 crc kubenswrapper[4675]: I1125 12:38:18.336635 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:18 crc kubenswrapper[4675]: I1125 12:38:18.338579 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-6lknr" event={"ID":"ff650df4-ed32-43ee-99cf-25ea4d4b55d8","Type":"ContainerStarted","Data":"70cfe4a9a11556d0b69afff67a3fa2a0e632fd4d1453a16ed990f838b5aa0f40"} Nov 25 12:38:18 crc kubenswrapper[4675]: I1125 12:38:18.366072 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" podStartSLOduration=1.733918102 podStartE2EDuration="4.366053149s" podCreationTimestamp="2025-11-25 12:38:14 +0000 UTC" firstStartedPulling="2025-11-25 12:38:14.621654413 +0000 UTC m=+639.793246754" lastFinishedPulling="2025-11-25 12:38:17.25378945 +0000 UTC m=+642.425381801" observedRunningTime="2025-11-25 12:38:18.360266123 +0000 UTC m=+643.531858484" watchObservedRunningTime="2025-11-25 12:38:18.366053149 +0000 UTC m=+643.537645500" Nov 25 12:38:18 crc kubenswrapper[4675]: I1125 12:38:18.381715 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-6lknr" podStartSLOduration=1.951103626 podStartE2EDuration="4.38169668s" podCreationTimestamp="2025-11-25 12:38:14 +0000 UTC" firstStartedPulling="2025-11-25 12:38:14.881980318 +0000 UTC m=+640.053572659" lastFinishedPulling="2025-11-25 12:38:17.312573372 +0000 UTC m=+642.484165713" observedRunningTime="2025-11-25 12:38:18.375318638 +0000 UTC m=+643.546910989" watchObservedRunningTime="2025-11-25 12:38:18.38169668 +0000 UTC m=+643.553289031" Nov 25 12:38:19 crc kubenswrapper[4675]: I1125 12:38:19.345243 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" event={"ID":"9624922e-a281-4931-97a5-47ae5c1e78f4","Type":"ContainerStarted","Data":"aebaf16aba45415c792394b8ff00c09669419d73520fe51a027f50750ac30743"} Nov 25 12:38:19 crc kubenswrapper[4675]: I1125 12:38:19.363297 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" podStartSLOduration=2.158525978 podStartE2EDuration="6.363267581s" podCreationTimestamp="2025-11-25 12:38:13 +0000 UTC" firstStartedPulling="2025-11-25 12:38:14.866129642 +0000 UTC m=+640.037721983" lastFinishedPulling="2025-11-25 12:38:19.070871245 +0000 UTC m=+644.242463586" observedRunningTime="2025-11-25 12:38:19.362665821 +0000 UTC m=+644.534258162" watchObservedRunningTime="2025-11-25 12:38:19.363267581 +0000 UTC m=+644.534859922" Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.730836 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gv9qh"] Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732438 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-controller" containerID="cri-o://82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732533 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="northd" containerID="cri-o://ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732690 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="sbdb" containerID="cri-o://49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732533 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-node" containerID="cri-o://dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732694 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-acl-logging" containerID="cri-o://8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732850 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.732492 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="nbdb" containerID="cri-o://9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" gracePeriod=30 Nov 25 12:38:23 crc kubenswrapper[4675]: I1125 12:38:23.770404 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" containerID="cri-o://105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" gracePeriod=30 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.067202 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/3.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.069800 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovn-acl-logging/0.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.070318 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovn-controller/0.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.070721 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127183 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rswdb"] Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127436 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-acl-logging" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127454 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-acl-logging" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127467 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127474 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127491 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-node" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127501 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-node" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127511 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127518 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127529 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127537 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127546 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kubecfg-setup" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127554 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kubecfg-setup" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127562 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127569 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127578 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127585 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127594 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="sbdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127601 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="sbdb" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127611 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="nbdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127617 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="nbdb" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127626 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="northd" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127632 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="northd" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127738 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="sbdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127748 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127757 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-node" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127766 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127773 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="nbdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127783 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127795 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="northd" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127805 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127831 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127840 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127851 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovn-acl-logging" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.127861 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.127992 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.128002 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.128014 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.128021 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerName="ovnkube-controller" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.130045 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207167 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207221 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-ovn\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207257 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-systemd\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207284 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-slash\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207337 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-etc-openvswitch\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207365 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-log-socket\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207381 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-slash" (OuterVolumeSpecName: "host-slash") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207393 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-script-lib\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207412 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-kubelet\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207420 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-log-socket" (OuterVolumeSpecName: "log-socket") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207490 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrftd\" (UniqueName: \"kubernetes.io/projected/e5671459-4981-4259-a31d-595dd6f1f4b3-kube-api-access-wrftd\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207532 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-bin\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207473 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207558 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-openvswitch\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207590 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207620 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207635 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-ovn-kubernetes\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207691 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-env-overrides\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207712 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207722 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-systemd-units\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207760 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-netns\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207784 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-config\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207805 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-netd\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207897 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207908 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-var-lib-openvswitch\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207935 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e5671459-4981-4259-a31d-595dd6f1f4b3-ovn-node-metrics-cert\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207950 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-node-log\") pod \"e5671459-4981-4259-a31d-595dd6f1f4b3\" (UID: \"e5671459-4981-4259-a31d-595dd6f1f4b3\") " Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208100 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-cni-bin\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208128 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-var-lib-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208151 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-node-log\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208177 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-ovnkube-config\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207935 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207952 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208200 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-slash\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207970 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208230 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207986 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.207314 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208244 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-run-netns\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208158 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208182 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-node-log" (OuterVolumeSpecName: "node-log") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208178 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208296 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-env-overrides\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208301 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208319 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-ovnkube-script-lib\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208356 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208382 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208400 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmd6z\" (UniqueName: \"kubernetes.io/projected/d5ea0e72-101d-42b2-b166-35bd5d744684-kube-api-access-gmd6z\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208419 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d5ea0e72-101d-42b2-b166-35bd5d744684-ovn-node-metrics-cert\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208438 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-cni-netd\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208485 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-etc-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208519 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-log-socket\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208544 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-ovn\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208571 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-systemd-units\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208593 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-run-ovn-kubernetes\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208607 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-systemd\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208624 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-kubelet\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208661 4675 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208671 4675 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208679 4675 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208687 4675 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208695 4675 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208703 4675 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208711 4675 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208719 4675 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208729 4675 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208738 4675 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208747 4675 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208757 4675 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208767 4675 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208776 4675 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e5671459-4981-4259-a31d-595dd6f1f4b3-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208784 4675 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208792 4675 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.208801 4675 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.216213 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5671459-4981-4259-a31d-595dd6f1f4b3-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.217292 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5671459-4981-4259-a31d-595dd6f1f4b3-kube-api-access-wrftd" (OuterVolumeSpecName: "kube-api-access-wrftd") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "kube-api-access-wrftd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.233249 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "e5671459-4981-4259-a31d-595dd6f1f4b3" (UID: "e5671459-4981-4259-a31d-595dd6f1f4b3"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309589 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-log-socket\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309648 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-ovn\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309680 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-systemd-units\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309703 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-run-ovn-kubernetes\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309720 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-systemd\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309740 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-kubelet\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309768 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-cni-bin\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309787 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-var-lib-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309806 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-node-log\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309905 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-ovnkube-config\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309932 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-slash\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309954 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-run-netns\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.309978 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-env-overrides\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310002 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-ovnkube-script-lib\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310029 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310050 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310073 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmd6z\" (UniqueName: \"kubernetes.io/projected/d5ea0e72-101d-42b2-b166-35bd5d744684-kube-api-access-gmd6z\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310090 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-cni-bin\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310090 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-slash\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310104 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d5ea0e72-101d-42b2-b166-35bd5d744684-ovn-node-metrics-cert\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310215 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-kubelet\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310245 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-ovn\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310293 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-cni-netd\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310328 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-run-netns\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310348 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-node-log\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310871 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-env-overrides\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310909 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-log-socket\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310932 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-var-lib-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.310960 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-cni-netd\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311000 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-ovnkube-config\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311074 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-etc-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311117 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-etc-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311209 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-openvswitch\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311232 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-run-ovn-kubernetes\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311259 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311311 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-systemd-units\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311343 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d5ea0e72-101d-42b2-b166-35bd5d744684-run-systemd\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311366 4675 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e5671459-4981-4259-a31d-595dd6f1f4b3-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311383 4675 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e5671459-4981-4259-a31d-595dd6f1f4b3-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311397 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrftd\" (UniqueName: \"kubernetes.io/projected/e5671459-4981-4259-a31d-595dd6f1f4b3-kube-api-access-wrftd\") on node \"crc\" DevicePath \"\"" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.311808 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d5ea0e72-101d-42b2-b166-35bd5d744684-ovnkube-script-lib\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.315025 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d5ea0e72-101d-42b2-b166-35bd5d744684-ovn-node-metrics-cert\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.330147 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmd6z\" (UniqueName: \"kubernetes.io/projected/d5ea0e72-101d-42b2-b166-35bd5d744684-kube-api-access-gmd6z\") pod \"ovnkube-node-rswdb\" (UID: \"d5ea0e72-101d-42b2-b166-35bd5d744684\") " pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.365931 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-j2mvp" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.382597 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/2.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.383136 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/1.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.383189 4675 generic.go:334] "Generic (PLEG): container finished" podID="ede74da4-0d3a-463f-a591-b722f62358c8" containerID="ee35c51144c4041593a77cf4bb532a23c3a6c5b4fcd31a566eca138a5f16e8dd" exitCode=2 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.383243 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerDied","Data":"ee35c51144c4041593a77cf4bb532a23c3a6c5b4fcd31a566eca138a5f16e8dd"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.383278 4675 scope.go:117] "RemoveContainer" containerID="ea99a42f079450225311570cfae89149e564f901622857914047797e1cec5533" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.384134 4675 scope.go:117] "RemoveContainer" containerID="ee35c51144c4041593a77cf4bb532a23c3a6c5b4fcd31a566eca138a5f16e8dd" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.384352 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-cgbpj_openshift-multus(ede74da4-0d3a-463f-a591-b722f62358c8)\"" pod="openshift-multus/multus-cgbpj" podUID="ede74da4-0d3a-463f-a591-b722f62358c8" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.389123 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovnkube-controller/3.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.392182 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovn-acl-logging/0.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.392728 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gv9qh_e5671459-4981-4259-a31d-595dd6f1f4b3/ovn-controller/0.log" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393158 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" exitCode=0 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393183 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" exitCode=0 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393192 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" exitCode=0 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393201 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" exitCode=0 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393210 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" exitCode=0 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393218 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" exitCode=0 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393226 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" exitCode=143 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393236 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5671459-4981-4259-a31d-595dd6f1f4b3" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" exitCode=143 Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393258 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393288 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393302 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393317 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393330 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393262 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393343 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393355 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393369 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393377 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393385 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393391 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393398 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393405 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393412 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393418 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393425 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393435 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393446 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393455 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393462 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393469 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393477 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393484 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393490 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393501 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393507 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393515 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393524 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393534 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393543 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393550 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393556 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393563 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393570 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393577 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393584 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393591 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393597 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393605 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gv9qh" event={"ID":"e5671459-4981-4259-a31d-595dd6f1f4b3","Type":"ContainerDied","Data":"12600a85ff57348c06651f35503103bfc376b08171c8dad4d0502c5a9d32711a"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393615 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393622 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393628 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393635 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393641 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393647 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393654 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393660 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393666 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.393672 4675 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.418414 4675 scope.go:117] "RemoveContainer" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.441030 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gv9qh"] Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.441673 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.443240 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gv9qh"] Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.445726 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.465880 4675 scope.go:117] "RemoveContainer" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.480146 4675 scope.go:117] "RemoveContainer" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.503078 4675 scope.go:117] "RemoveContainer" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.532404 4675 scope.go:117] "RemoveContainer" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.553600 4675 scope.go:117] "RemoveContainer" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.571479 4675 scope.go:117] "RemoveContainer" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.587332 4675 scope.go:117] "RemoveContainer" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.628919 4675 scope.go:117] "RemoveContainer" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.644510 4675 scope.go:117] "RemoveContainer" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.645291 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": container with ID starting with 105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92 not found: ID does not exist" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.645388 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} err="failed to get container status \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": rpc error: code = NotFound desc = could not find container \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": container with ID starting with 105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.645416 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.645789 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": container with ID starting with 958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae not found: ID does not exist" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.645840 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} err="failed to get container status \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": rpc error: code = NotFound desc = could not find container \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": container with ID starting with 958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.645856 4675 scope.go:117] "RemoveContainer" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.646203 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": container with ID starting with 49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419 not found: ID does not exist" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.646223 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} err="failed to get container status \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": rpc error: code = NotFound desc = could not find container \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": container with ID starting with 49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.646241 4675 scope.go:117] "RemoveContainer" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.646516 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": container with ID starting with 9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818 not found: ID does not exist" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.646540 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} err="failed to get container status \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": rpc error: code = NotFound desc = could not find container \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": container with ID starting with 9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.646556 4675 scope.go:117] "RemoveContainer" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.646767 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": container with ID starting with ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6 not found: ID does not exist" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.646789 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} err="failed to get container status \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": rpc error: code = NotFound desc = could not find container \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": container with ID starting with ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.646846 4675 scope.go:117] "RemoveContainer" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.647168 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": container with ID starting with 9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542 not found: ID does not exist" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.647188 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} err="failed to get container status \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": rpc error: code = NotFound desc = could not find container \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": container with ID starting with 9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.647202 4675 scope.go:117] "RemoveContainer" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.647415 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": container with ID starting with dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84 not found: ID does not exist" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.647444 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} err="failed to get container status \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": rpc error: code = NotFound desc = could not find container \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": container with ID starting with dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.647461 4675 scope.go:117] "RemoveContainer" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.647769 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": container with ID starting with 8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b not found: ID does not exist" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.647786 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} err="failed to get container status \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": rpc error: code = NotFound desc = could not find container \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": container with ID starting with 8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.647831 4675 scope.go:117] "RemoveContainer" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.648152 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": container with ID starting with 82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08 not found: ID does not exist" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.648174 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} err="failed to get container status \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": rpc error: code = NotFound desc = could not find container \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": container with ID starting with 82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.648189 4675 scope.go:117] "RemoveContainer" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" Nov 25 12:38:24 crc kubenswrapper[4675]: E1125 12:38:24.648388 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": container with ID starting with 1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88 not found: ID does not exist" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.648406 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} err="failed to get container status \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": rpc error: code = NotFound desc = could not find container \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": container with ID starting with 1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.648419 4675 scope.go:117] "RemoveContainer" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.648663 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} err="failed to get container status \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": rpc error: code = NotFound desc = could not find container \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": container with ID starting with 105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.648683 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.649060 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} err="failed to get container status \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": rpc error: code = NotFound desc = could not find container \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": container with ID starting with 958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.649082 4675 scope.go:117] "RemoveContainer" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.649449 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} err="failed to get container status \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": rpc error: code = NotFound desc = could not find container \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": container with ID starting with 49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.649499 4675 scope.go:117] "RemoveContainer" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.649871 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} err="failed to get container status \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": rpc error: code = NotFound desc = could not find container \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": container with ID starting with 9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.649922 4675 scope.go:117] "RemoveContainer" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.650326 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} err="failed to get container status \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": rpc error: code = NotFound desc = could not find container \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": container with ID starting with ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.650343 4675 scope.go:117] "RemoveContainer" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.650601 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} err="failed to get container status \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": rpc error: code = NotFound desc = could not find container \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": container with ID starting with 9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.650618 4675 scope.go:117] "RemoveContainer" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.650840 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} err="failed to get container status \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": rpc error: code = NotFound desc = could not find container \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": container with ID starting with dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.650867 4675 scope.go:117] "RemoveContainer" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.651081 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} err="failed to get container status \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": rpc error: code = NotFound desc = could not find container \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": container with ID starting with 8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.651103 4675 scope.go:117] "RemoveContainer" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.651428 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} err="failed to get container status \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": rpc error: code = NotFound desc = could not find container \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": container with ID starting with 82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.651454 4675 scope.go:117] "RemoveContainer" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.651738 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} err="failed to get container status \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": rpc error: code = NotFound desc = could not find container \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": container with ID starting with 1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.651760 4675 scope.go:117] "RemoveContainer" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.652079 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} err="failed to get container status \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": rpc error: code = NotFound desc = could not find container \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": container with ID starting with 105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.652116 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.652517 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} err="failed to get container status \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": rpc error: code = NotFound desc = could not find container \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": container with ID starting with 958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.652541 4675 scope.go:117] "RemoveContainer" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.652802 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} err="failed to get container status \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": rpc error: code = NotFound desc = could not find container \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": container with ID starting with 49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.652842 4675 scope.go:117] "RemoveContainer" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653096 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} err="failed to get container status \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": rpc error: code = NotFound desc = could not find container \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": container with ID starting with 9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653114 4675 scope.go:117] "RemoveContainer" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653345 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} err="failed to get container status \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": rpc error: code = NotFound desc = could not find container \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": container with ID starting with ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653362 4675 scope.go:117] "RemoveContainer" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653587 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} err="failed to get container status \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": rpc error: code = NotFound desc = could not find container \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": container with ID starting with 9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653615 4675 scope.go:117] "RemoveContainer" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653887 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} err="failed to get container status \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": rpc error: code = NotFound desc = could not find container \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": container with ID starting with dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.653909 4675 scope.go:117] "RemoveContainer" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654233 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} err="failed to get container status \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": rpc error: code = NotFound desc = could not find container \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": container with ID starting with 8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654251 4675 scope.go:117] "RemoveContainer" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654495 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} err="failed to get container status \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": rpc error: code = NotFound desc = could not find container \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": container with ID starting with 82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654520 4675 scope.go:117] "RemoveContainer" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654716 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} err="failed to get container status \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": rpc error: code = NotFound desc = could not find container \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": container with ID starting with 1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654737 4675 scope.go:117] "RemoveContainer" containerID="105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654946 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92"} err="failed to get container status \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": rpc error: code = NotFound desc = could not find container \"105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92\": container with ID starting with 105e18972d73caa6afec30afb589c70f97b0ae8ac068a9aeb2761a1353a23f92 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.654968 4675 scope.go:117] "RemoveContainer" containerID="958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.655207 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae"} err="failed to get container status \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": rpc error: code = NotFound desc = could not find container \"958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae\": container with ID starting with 958240d2f500af8114515ece73b31678da198b3401f39ca94a607fa5db71a7ae not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.655232 4675 scope.go:117] "RemoveContainer" containerID="49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.655660 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419"} err="failed to get container status \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": rpc error: code = NotFound desc = could not find container \"49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419\": container with ID starting with 49113527bae4d814401b146f44fcdcf678a989f0c7429da709e010207b196419 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.655713 4675 scope.go:117] "RemoveContainer" containerID="9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.655968 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818"} err="failed to get container status \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": rpc error: code = NotFound desc = could not find container \"9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818\": container with ID starting with 9663120eccf5a7503bb3feaf9f186e5a30402a306be0e9736ec3e99898ddb818 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.655995 4675 scope.go:117] "RemoveContainer" containerID="ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.656292 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6"} err="failed to get container status \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": rpc error: code = NotFound desc = could not find container \"ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6\": container with ID starting with ea621708c61f06ed1116b36425d87f16c407a357caf1d392936036e97d6938a6 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.656315 4675 scope.go:117] "RemoveContainer" containerID="9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.656501 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542"} err="failed to get container status \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": rpc error: code = NotFound desc = could not find container \"9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542\": container with ID starting with 9afe8e8209531f7e09707723c4193c253c895dd223edada1fcb5b6a198803542 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.656525 4675 scope.go:117] "RemoveContainer" containerID="dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.656760 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84"} err="failed to get container status \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": rpc error: code = NotFound desc = could not find container \"dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84\": container with ID starting with dbde24bc6c126fa9bcd53930b81db63fb5a06c6cfc09fb58cec7204251300d84 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.656780 4675 scope.go:117] "RemoveContainer" containerID="8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.657037 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b"} err="failed to get container status \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": rpc error: code = NotFound desc = could not find container \"8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b\": container with ID starting with 8240d82fc962651dc715e190378a68dc14b8821ef5f8c81014d901c805e2936b not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.657056 4675 scope.go:117] "RemoveContainer" containerID="82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.657394 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08"} err="failed to get container status \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": rpc error: code = NotFound desc = could not find container \"82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08\": container with ID starting with 82f19b1b335fc7573777fc2254318c5a8fcfe44d05a1c665b6186ca105d38c08 not found: ID does not exist" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.657416 4675 scope.go:117] "RemoveContainer" containerID="1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88" Nov 25 12:38:24 crc kubenswrapper[4675]: I1125 12:38:24.657717 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88"} err="failed to get container status \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": rpc error: code = NotFound desc = could not find container \"1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88\": container with ID starting with 1a4073f6dea3ff722667009c8272678a2a347b36ce005f3ca4bd1339cc46ce88 not found: ID does not exist" Nov 25 12:38:25 crc kubenswrapper[4675]: I1125 12:38:25.401552 4675 generic.go:334] "Generic (PLEG): container finished" podID="d5ea0e72-101d-42b2-b166-35bd5d744684" containerID="a8439c9dc3508d6269b781bf8aa632a1f71f75e3b23bd07564bc0d8409552797" exitCode=0 Nov 25 12:38:25 crc kubenswrapper[4675]: I1125 12:38:25.401641 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerDied","Data":"a8439c9dc3508d6269b781bf8aa632a1f71f75e3b23bd07564bc0d8409552797"} Nov 25 12:38:25 crc kubenswrapper[4675]: I1125 12:38:25.402736 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"7d6d7cd8cc11817d422a365c73737b315712816c674652b536cc0b4ef256fe96"} Nov 25 12:38:25 crc kubenswrapper[4675]: I1125 12:38:25.404397 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/2.log" Nov 25 12:38:25 crc kubenswrapper[4675]: I1125 12:38:25.539354 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5671459-4981-4259-a31d-595dd6f1f4b3" path="/var/lib/kubelet/pods/e5671459-4981-4259-a31d-595dd6f1f4b3/volumes" Nov 25 12:38:26 crc kubenswrapper[4675]: I1125 12:38:26.412935 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"2489694a0e1544245866ca6e738c6e0bf759d3a75556fb143c54b83df1f27239"} Nov 25 12:38:26 crc kubenswrapper[4675]: I1125 12:38:26.413267 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"a5ac17d1bc847d4f7b038fa8a06f3c8cffda72106e95b2ee9d819932d346cfcf"} Nov 25 12:38:26 crc kubenswrapper[4675]: I1125 12:38:26.413283 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"11fb9ae3b38b0ad59d0733849daa0b9397aed254a3c1d6a2f14fc2171ceca3b5"} Nov 25 12:38:26 crc kubenswrapper[4675]: I1125 12:38:26.413294 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"0446cdbfff84fd25da3f13318aa3322920eea1d1534783143948b3477492a268"} Nov 25 12:38:26 crc kubenswrapper[4675]: I1125 12:38:26.413312 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"bf6ce6360bf473bac674899c1059eaea71be948a6197ea6dfbb3dc8483deb7d3"} Nov 25 12:38:26 crc kubenswrapper[4675]: I1125 12:38:26.413322 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"a65e66ef60df9e19cdc1d380cc10e07bf2db31619b0d37d199a1f089c350739e"} Nov 25 12:38:28 crc kubenswrapper[4675]: I1125 12:38:28.429575 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"c1c818d36e749b874fb9f0709ca5b1002b284b3669a2499f4ddcd05eae7467bd"} Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.448717 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" event={"ID":"d5ea0e72-101d-42b2-b166-35bd5d744684","Type":"ContainerStarted","Data":"09c00711cb4480a411a68e2b2d48cf2b9e9747999a295cac2fdf1e4c46b43ef4"} Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.451904 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.451943 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.451959 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.497738 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" podStartSLOduration=7.497718091 podStartE2EDuration="7.497718091s" podCreationTimestamp="2025-11-25 12:38:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:38:31.493400543 +0000 UTC m=+656.664992884" watchObservedRunningTime="2025-11-25 12:38:31.497718091 +0000 UTC m=+656.669310432" Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.501791 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:31 crc kubenswrapper[4675]: I1125 12:38:31.515602 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:38 crc kubenswrapper[4675]: I1125 12:38:38.532473 4675 scope.go:117] "RemoveContainer" containerID="ee35c51144c4041593a77cf4bb532a23c3a6c5b4fcd31a566eca138a5f16e8dd" Nov 25 12:38:38 crc kubenswrapper[4675]: E1125 12:38:38.533535 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-cgbpj_openshift-multus(ede74da4-0d3a-463f-a591-b722f62358c8)\"" pod="openshift-multus/multus-cgbpj" podUID="ede74da4-0d3a-463f-a591-b722f62358c8" Nov 25 12:38:53 crc kubenswrapper[4675]: I1125 12:38:53.532419 4675 scope.go:117] "RemoveContainer" containerID="ee35c51144c4041593a77cf4bb532a23c3a6c5b4fcd31a566eca138a5f16e8dd" Nov 25 12:38:54 crc kubenswrapper[4675]: I1125 12:38:54.470271 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rswdb" Nov 25 12:38:54 crc kubenswrapper[4675]: I1125 12:38:54.648618 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-cgbpj_ede74da4-0d3a-463f-a591-b722f62358c8/kube-multus/2.log" Nov 25 12:38:54 crc kubenswrapper[4675]: I1125 12:38:54.648684 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-cgbpj" event={"ID":"ede74da4-0d3a-463f-a591-b722f62358c8","Type":"ContainerStarted","Data":"03003e2345bc3a5fcb36fe0e5934f64a51a22d0a9c4dd4869e259c988e07fe92"} Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.382330 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x"] Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.384333 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.386621 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.395251 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x"] Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.542042 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.542093 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.542242 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gmhj\" (UniqueName: \"kubernetes.io/projected/5307bc14-522d-4a40-a135-8ba280d2202f-kube-api-access-8gmhj\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.643982 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.644052 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.644130 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gmhj\" (UniqueName: \"kubernetes.io/projected/5307bc14-522d-4a40-a135-8ba280d2202f-kube-api-access-8gmhj\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.644680 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.644728 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.669696 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gmhj\" (UniqueName: \"kubernetes.io/projected/5307bc14-522d-4a40-a135-8ba280d2202f-kube-api-access-8gmhj\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.701717 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:04 crc kubenswrapper[4675]: I1125 12:39:04.873134 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x"] Nov 25 12:39:05 crc kubenswrapper[4675]: I1125 12:39:05.705133 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerStarted","Data":"2de842e0177ddf49e3c9b385127ad3bce8273fd59498e2c059acf65216d20bf4"} Nov 25 12:39:05 crc kubenswrapper[4675]: I1125 12:39:05.705465 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerStarted","Data":"f8a741ae9f1fe52688c103ad75254888655a749b0b73b3f9ae3e84e350604098"} Nov 25 12:39:06 crc kubenswrapper[4675]: I1125 12:39:06.711328 4675 generic.go:334] "Generic (PLEG): container finished" podID="5307bc14-522d-4a40-a135-8ba280d2202f" containerID="2de842e0177ddf49e3c9b385127ad3bce8273fd59498e2c059acf65216d20bf4" exitCode=0 Nov 25 12:39:06 crc kubenswrapper[4675]: I1125 12:39:06.711374 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerDied","Data":"2de842e0177ddf49e3c9b385127ad3bce8273fd59498e2c059acf65216d20bf4"} Nov 25 12:39:10 crc kubenswrapper[4675]: I1125 12:39:10.732533 4675 generic.go:334] "Generic (PLEG): container finished" podID="5307bc14-522d-4a40-a135-8ba280d2202f" containerID="584673ccf6e8629177a6b355bfb03aefd6c9b9e95ff54f0a0a1f61b9982c9833" exitCode=0 Nov 25 12:39:10 crc kubenswrapper[4675]: I1125 12:39:10.732629 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerDied","Data":"584673ccf6e8629177a6b355bfb03aefd6c9b9e95ff54f0a0a1f61b9982c9833"} Nov 25 12:39:11 crc kubenswrapper[4675]: I1125 12:39:11.741433 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerStarted","Data":"fe352975e2110431bd32adf122d912b3b8dd123f848c232e524c71399200cfa4"} Nov 25 12:39:11 crc kubenswrapper[4675]: I1125 12:39:11.761192 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" podStartSLOduration=4.641129398 podStartE2EDuration="7.761173106s" podCreationTimestamp="2025-11-25 12:39:04 +0000 UTC" firstStartedPulling="2025-11-25 12:39:06.713074372 +0000 UTC m=+691.884666733" lastFinishedPulling="2025-11-25 12:39:09.8331181 +0000 UTC m=+695.004710441" observedRunningTime="2025-11-25 12:39:11.756644752 +0000 UTC m=+696.928237103" watchObservedRunningTime="2025-11-25 12:39:11.761173106 +0000 UTC m=+696.932765467" Nov 25 12:39:12 crc kubenswrapper[4675]: I1125 12:39:12.751769 4675 generic.go:334] "Generic (PLEG): container finished" podID="5307bc14-522d-4a40-a135-8ba280d2202f" containerID="fe352975e2110431bd32adf122d912b3b8dd123f848c232e524c71399200cfa4" exitCode=0 Nov 25 12:39:12 crc kubenswrapper[4675]: I1125 12:39:12.751975 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerDied","Data":"fe352975e2110431bd32adf122d912b3b8dd123f848c232e524c71399200cfa4"} Nov 25 12:39:13 crc kubenswrapper[4675]: I1125 12:39:13.662628 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:39:13 crc kubenswrapper[4675]: I1125 12:39:13.662684 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:39:13 crc kubenswrapper[4675]: I1125 12:39:13.951416 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.151318 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gmhj\" (UniqueName: \"kubernetes.io/projected/5307bc14-522d-4a40-a135-8ba280d2202f-kube-api-access-8gmhj\") pod \"5307bc14-522d-4a40-a135-8ba280d2202f\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.151366 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-util\") pod \"5307bc14-522d-4a40-a135-8ba280d2202f\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.151393 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-bundle\") pod \"5307bc14-522d-4a40-a135-8ba280d2202f\" (UID: \"5307bc14-522d-4a40-a135-8ba280d2202f\") " Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.152227 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-bundle" (OuterVolumeSpecName: "bundle") pod "5307bc14-522d-4a40-a135-8ba280d2202f" (UID: "5307bc14-522d-4a40-a135-8ba280d2202f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.157077 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5307bc14-522d-4a40-a135-8ba280d2202f-kube-api-access-8gmhj" (OuterVolumeSpecName: "kube-api-access-8gmhj") pod "5307bc14-522d-4a40-a135-8ba280d2202f" (UID: "5307bc14-522d-4a40-a135-8ba280d2202f"). InnerVolumeSpecName "kube-api-access-8gmhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.165155 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-util" (OuterVolumeSpecName: "util") pod "5307bc14-522d-4a40-a135-8ba280d2202f" (UID: "5307bc14-522d-4a40-a135-8ba280d2202f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.252401 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gmhj\" (UniqueName: \"kubernetes.io/projected/5307bc14-522d-4a40-a135-8ba280d2202f-kube-api-access-8gmhj\") on node \"crc\" DevicePath \"\"" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.252446 4675 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-util\") on node \"crc\" DevicePath \"\"" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.252458 4675 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5307bc14-522d-4a40-a135-8ba280d2202f-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.763702 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" event={"ID":"5307bc14-522d-4a40-a135-8ba280d2202f","Type":"ContainerDied","Data":"f8a741ae9f1fe52688c103ad75254888655a749b0b73b3f9ae3e84e350604098"} Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.763738 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8a741ae9f1fe52688c103ad75254888655a749b0b73b3f9ae3e84e350604098" Nov 25 12:39:14 crc kubenswrapper[4675]: I1125 12:39:14.763790 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.970808 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-tld8t"] Nov 25 12:39:20 crc kubenswrapper[4675]: E1125 12:39:20.971476 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="util" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.971487 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="util" Nov 25 12:39:20 crc kubenswrapper[4675]: E1125 12:39:20.971501 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="pull" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.971506 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="pull" Nov 25 12:39:20 crc kubenswrapper[4675]: E1125 12:39:20.971516 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="extract" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.971523 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="extract" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.971606 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="5307bc14-522d-4a40-a135-8ba280d2202f" containerName="extract" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.972018 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.974382 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-p9gx9" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.974519 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.975339 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 12:39:20 crc kubenswrapper[4675]: I1125 12:39:20.985120 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-tld8t"] Nov 25 12:39:21 crc kubenswrapper[4675]: I1125 12:39:21.132575 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n4gf\" (UniqueName: \"kubernetes.io/projected/669a3ab6-17cd-4b0e-8498-bbf3bd4041f4-kube-api-access-5n4gf\") pod \"nmstate-operator-557fdffb88-tld8t\" (UID: \"669a3ab6-17cd-4b0e-8498-bbf3bd4041f4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" Nov 25 12:39:21 crc kubenswrapper[4675]: I1125 12:39:21.233976 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n4gf\" (UniqueName: \"kubernetes.io/projected/669a3ab6-17cd-4b0e-8498-bbf3bd4041f4-kube-api-access-5n4gf\") pod \"nmstate-operator-557fdffb88-tld8t\" (UID: \"669a3ab6-17cd-4b0e-8498-bbf3bd4041f4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" Nov 25 12:39:21 crc kubenswrapper[4675]: I1125 12:39:21.252835 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n4gf\" (UniqueName: \"kubernetes.io/projected/669a3ab6-17cd-4b0e-8498-bbf3bd4041f4-kube-api-access-5n4gf\") pod \"nmstate-operator-557fdffb88-tld8t\" (UID: \"669a3ab6-17cd-4b0e-8498-bbf3bd4041f4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" Nov 25 12:39:21 crc kubenswrapper[4675]: I1125 12:39:21.290896 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" Nov 25 12:39:21 crc kubenswrapper[4675]: I1125 12:39:21.480765 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-tld8t"] Nov 25 12:39:21 crc kubenswrapper[4675]: I1125 12:39:21.795945 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" event={"ID":"669a3ab6-17cd-4b0e-8498-bbf3bd4041f4","Type":"ContainerStarted","Data":"0d947106b365227126dbe3e92bf02c6f9d43a22883f2e3d631509de95e6879d2"} Nov 25 12:39:27 crc kubenswrapper[4675]: I1125 12:39:27.827693 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" event={"ID":"669a3ab6-17cd-4b0e-8498-bbf3bd4041f4","Type":"ContainerStarted","Data":"f815705b73f5126f28be6cf11284315316fe588cdfd24f2ee97c52bbb20306b9"} Nov 25 12:39:28 crc kubenswrapper[4675]: I1125 12:39:28.848476 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-tld8t" podStartSLOduration=2.786206368 podStartE2EDuration="8.848457371s" podCreationTimestamp="2025-11-25 12:39:20 +0000 UTC" firstStartedPulling="2025-11-25 12:39:21.492649081 +0000 UTC m=+706.664241422" lastFinishedPulling="2025-11-25 12:39:27.554900074 +0000 UTC m=+712.726492425" observedRunningTime="2025-11-25 12:39:28.847067559 +0000 UTC m=+714.018659920" watchObservedRunningTime="2025-11-25 12:39:28.848457371 +0000 UTC m=+714.020049712" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.733599 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.734434 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.736780 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-qlfqc" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.763238 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.774903 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.775702 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.785207 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.797066 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-xzw2f"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.797701 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.815147 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.912495 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.913261 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.918762 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-2wl6d" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.919392 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.927242 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5"] Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.930139 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931105 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/fff162cc-a7a0-4cbb-930f-7867fbb1cf70-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-z8hd2\" (UID: \"fff162cc-a7a0-4cbb-930f-7867fbb1cf70\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931184 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6btk6\" (UniqueName: \"kubernetes.io/projected/48952822-4273-4558-b07e-ad6e9e80dbdf-kube-api-access-6btk6\") pod \"nmstate-metrics-5dcf9c57c5-gwgnx\" (UID: \"48952822-4273-4558-b07e-ad6e9e80dbdf\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931212 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-dbus-socket\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931234 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931264 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-ovs-socket\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931286 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7qw5\" (UniqueName: \"kubernetes.io/projected/83ed5cc3-8ce1-4765-b975-c7a543434c95-kube-api-access-z7qw5\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931306 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-nmstate-lock\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931325 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs8pp\" (UniqueName: \"kubernetes.io/projected/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-kube-api-access-fs8pp\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931346 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6975c\" (UniqueName: \"kubernetes.io/projected/fff162cc-a7a0-4cbb-930f-7867fbb1cf70-kube-api-access-6975c\") pod \"nmstate-webhook-6b89b748d8-z8hd2\" (UID: \"fff162cc-a7a0-4cbb-930f-7867fbb1cf70\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:29 crc kubenswrapper[4675]: I1125 12:39:29.931378 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032081 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6btk6\" (UniqueName: \"kubernetes.io/projected/48952822-4273-4558-b07e-ad6e9e80dbdf-kube-api-access-6btk6\") pod \"nmstate-metrics-5dcf9c57c5-gwgnx\" (UID: \"48952822-4273-4558-b07e-ad6e9e80dbdf\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032123 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-dbus-socket\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032144 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032170 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-ovs-socket\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032191 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7qw5\" (UniqueName: \"kubernetes.io/projected/83ed5cc3-8ce1-4765-b975-c7a543434c95-kube-api-access-z7qw5\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032207 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-nmstate-lock\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032223 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs8pp\" (UniqueName: \"kubernetes.io/projected/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-kube-api-access-fs8pp\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032241 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6975c\" (UniqueName: \"kubernetes.io/projected/fff162cc-a7a0-4cbb-930f-7867fbb1cf70-kube-api-access-6975c\") pod \"nmstate-webhook-6b89b748d8-z8hd2\" (UID: \"fff162cc-a7a0-4cbb-930f-7867fbb1cf70\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032265 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032286 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/fff162cc-a7a0-4cbb-930f-7867fbb1cf70-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-z8hd2\" (UID: \"fff162cc-a7a0-4cbb-930f-7867fbb1cf70\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032422 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-dbus-socket\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032595 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-ovs-socket\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.032674 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/83ed5cc3-8ce1-4765-b975-c7a543434c95-nmstate-lock\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.033046 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.059665 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs8pp\" (UniqueName: \"kubernetes.io/projected/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-kube-api-access-fs8pp\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.065617 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6975c\" (UniqueName: \"kubernetes.io/projected/fff162cc-a7a0-4cbb-930f-7867fbb1cf70-kube-api-access-6975c\") pod \"nmstate-webhook-6b89b748d8-z8hd2\" (UID: \"fff162cc-a7a0-4cbb-930f-7867fbb1cf70\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.070330 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/fff162cc-a7a0-4cbb-930f-7867fbb1cf70-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-z8hd2\" (UID: \"fff162cc-a7a0-4cbb-930f-7867fbb1cf70\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.078557 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c147bbe0-eb8e-44ff-b4e2-271af218f1ff-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2l8n5\" (UID: \"c147bbe0-eb8e-44ff-b4e2-271af218f1ff\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.101299 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6btk6\" (UniqueName: \"kubernetes.io/projected/48952822-4273-4558-b07e-ad6e9e80dbdf-kube-api-access-6btk6\") pod \"nmstate-metrics-5dcf9c57c5-gwgnx\" (UID: \"48952822-4273-4558-b07e-ad6e9e80dbdf\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.120111 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.154273 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7qw5\" (UniqueName: \"kubernetes.io/projected/83ed5cc3-8ce1-4765-b975-c7a543434c95-kube-api-access-z7qw5\") pod \"nmstate-handler-xzw2f\" (UID: \"83ed5cc3-8ce1-4765-b975-c7a543434c95\") " pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.217845 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6d56f8f4ff-7qprj"] Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.218666 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.232121 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.234473 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-oauth-serving-cert\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.234641 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-serving-cert\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.234710 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-oauth-config\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.234788 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75c94\" (UniqueName: \"kubernetes.io/projected/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-kube-api-access-75c94\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.234875 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-trusted-ca-bundle\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.234965 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-config\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.235035 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-service-ca\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.243102 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6d56f8f4ff-7qprj"] Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340108 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-config\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340347 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-service-ca\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340387 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-oauth-serving-cert\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340405 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-serving-cert\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340430 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-oauth-config\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340468 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75c94\" (UniqueName: \"kubernetes.io/projected/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-kube-api-access-75c94\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.340496 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-trusted-ca-bundle\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.341400 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-oauth-serving-cert\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.341940 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-config\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.342421 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-service-ca\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.343655 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-trusted-ca-bundle\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.348143 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.350801 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-serving-cert\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.352155 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-console-oauth-config\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.364843 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75c94\" (UniqueName: \"kubernetes.io/projected/0b86e6d4-6107-4f2f-8731-cbfc74c885ac-kube-api-access-75c94\") pod \"console-6d56f8f4ff-7qprj\" (UID: \"0b86e6d4-6107-4f2f-8731-cbfc74c885ac\") " pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.423259 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.469654 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2"] Nov 25 12:39:30 crc kubenswrapper[4675]: W1125 12:39:30.487618 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfff162cc_a7a0_4cbb_930f_7867fbb1cf70.slice/crio-3d49ff49e4eb1aca5fb5aa882326856708c4f15f3045499eba816b23184bc906 WatchSource:0}: Error finding container 3d49ff49e4eb1aca5fb5aa882326856708c4f15f3045499eba816b23184bc906: Status 404 returned error can't find the container with id 3d49ff49e4eb1aca5fb5aa882326856708c4f15f3045499eba816b23184bc906 Nov 25 12:39:30 crc kubenswrapper[4675]: W1125 12:39:30.500407 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83ed5cc3_8ce1_4765_b975_c7a543434c95.slice/crio-c1c808a0144cc8b993590b09e282dd80f08753280e932bf97af6583c935b1fc9 WatchSource:0}: Error finding container c1c808a0144cc8b993590b09e282dd80f08753280e932bf97af6583c935b1fc9: Status 404 returned error can't find the container with id c1c808a0144cc8b993590b09e282dd80f08753280e932bf97af6583c935b1fc9 Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.544090 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.576290 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx"] Nov 25 12:39:30 crc kubenswrapper[4675]: W1125 12:39:30.581906 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48952822_4273_4558_b07e_ad6e9e80dbdf.slice/crio-d7e806898c17709f5ab2e0d1bf455da30fcd436b0b078f2466696bb30a758be2 WatchSource:0}: Error finding container d7e806898c17709f5ab2e0d1bf455da30fcd436b0b078f2466696bb30a758be2: Status 404 returned error can't find the container with id d7e806898c17709f5ab2e0d1bf455da30fcd436b0b078f2466696bb30a758be2 Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.722329 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5"] Nov 25 12:39:30 crc kubenswrapper[4675]: W1125 12:39:30.732479 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc147bbe0_eb8e_44ff_b4e2_271af218f1ff.slice/crio-1701c213c4b54f647cf96dbd18ff69c3db8c8ad7637452cf2f60561150685b32 WatchSource:0}: Error finding container 1701c213c4b54f647cf96dbd18ff69c3db8c8ad7637452cf2f60561150685b32: Status 404 returned error can't find the container with id 1701c213c4b54f647cf96dbd18ff69c3db8c8ad7637452cf2f60561150685b32 Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.749838 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6d56f8f4ff-7qprj"] Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.854040 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" event={"ID":"48952822-4273-4558-b07e-ad6e9e80dbdf","Type":"ContainerStarted","Data":"d7e806898c17709f5ab2e0d1bf455da30fcd436b0b078f2466696bb30a758be2"} Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.855505 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6d56f8f4ff-7qprj" event={"ID":"0b86e6d4-6107-4f2f-8731-cbfc74c885ac","Type":"ContainerStarted","Data":"09dc3329ec8e925f2a47297dd8c36ea479dc8f650aa598230b3e230cf0f87b2e"} Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.856262 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-xzw2f" event={"ID":"83ed5cc3-8ce1-4765-b975-c7a543434c95","Type":"ContainerStarted","Data":"c1c808a0144cc8b993590b09e282dd80f08753280e932bf97af6583c935b1fc9"} Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.857074 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" event={"ID":"c147bbe0-eb8e-44ff-b4e2-271af218f1ff","Type":"ContainerStarted","Data":"1701c213c4b54f647cf96dbd18ff69c3db8c8ad7637452cf2f60561150685b32"} Nov 25 12:39:30 crc kubenswrapper[4675]: I1125 12:39:30.857732 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" event={"ID":"fff162cc-a7a0-4cbb-930f-7867fbb1cf70","Type":"ContainerStarted","Data":"3d49ff49e4eb1aca5fb5aa882326856708c4f15f3045499eba816b23184bc906"} Nov 25 12:39:31 crc kubenswrapper[4675]: I1125 12:39:31.864611 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6d56f8f4ff-7qprj" event={"ID":"0b86e6d4-6107-4f2f-8731-cbfc74c885ac","Type":"ContainerStarted","Data":"0bff787123c104a73a05c97bf267e3e00c66be79c6798fe6bb319b27f0e327a4"} Nov 25 12:39:31 crc kubenswrapper[4675]: I1125 12:39:31.896002 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6d56f8f4ff-7qprj" podStartSLOduration=1.895983473 podStartE2EDuration="1.895983473s" podCreationTimestamp="2025-11-25 12:39:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:39:31.889425713 +0000 UTC m=+717.061018064" watchObservedRunningTime="2025-11-25 12:39:31.895983473 +0000 UTC m=+717.067575814" Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.901658 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" event={"ID":"c147bbe0-eb8e-44ff-b4e2-271af218f1ff","Type":"ContainerStarted","Data":"07b3886df6cc4caf41b7a76e10c3180c72d362b3e099aae57adf7f29a00c9af1"} Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.903790 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-xzw2f" event={"ID":"83ed5cc3-8ce1-4765-b975-c7a543434c95","Type":"ContainerStarted","Data":"ddc68cabbf85e97701a8a4b13c78cbe99be99b082978317d26454426d97c9d23"} Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.903876 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.905320 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" event={"ID":"fff162cc-a7a0-4cbb-930f-7867fbb1cf70","Type":"ContainerStarted","Data":"14350cfca311bf1f5f22a1ba5454e48f5569e2e8895a3e90168528c6a5cf4637"} Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.905385 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.906727 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" event={"ID":"48952822-4273-4558-b07e-ad6e9e80dbdf","Type":"ContainerStarted","Data":"18de5368ae87bb5659de334864aab024794478f0f9fc33365dd60a20782c96c0"} Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.951077 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" podStartSLOduration=2.744751982 podStartE2EDuration="9.951054653s" podCreationTimestamp="2025-11-25 12:39:29 +0000 UTC" firstStartedPulling="2025-11-25 12:39:30.491662645 +0000 UTC m=+715.663254987" lastFinishedPulling="2025-11-25 12:39:37.697965277 +0000 UTC m=+722.869557658" observedRunningTime="2025-11-25 12:39:38.946979558 +0000 UTC m=+724.118571919" watchObservedRunningTime="2025-11-25 12:39:38.951054653 +0000 UTC m=+724.122647004" Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.951423 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2l8n5" podStartSLOduration=2.982912535 podStartE2EDuration="9.951417574s" podCreationTimestamp="2025-11-25 12:39:29 +0000 UTC" firstStartedPulling="2025-11-25 12:39:30.735006658 +0000 UTC m=+715.906598999" lastFinishedPulling="2025-11-25 12:39:37.703511666 +0000 UTC m=+722.875104038" observedRunningTime="2025-11-25 12:39:38.919159039 +0000 UTC m=+724.090751380" watchObservedRunningTime="2025-11-25 12:39:38.951417574 +0000 UTC m=+724.123009915" Nov 25 12:39:38 crc kubenswrapper[4675]: I1125 12:39:38.968341 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-xzw2f" podStartSLOduration=2.838052296 podStartE2EDuration="9.968325519s" podCreationTimestamp="2025-11-25 12:39:29 +0000 UTC" firstStartedPulling="2025-11-25 12:39:30.502996121 +0000 UTC m=+715.674588462" lastFinishedPulling="2025-11-25 12:39:37.633269344 +0000 UTC m=+722.804861685" observedRunningTime="2025-11-25 12:39:38.966305507 +0000 UTC m=+724.137897858" watchObservedRunningTime="2025-11-25 12:39:38.968325519 +0000 UTC m=+724.139917860" Nov 25 12:39:40 crc kubenswrapper[4675]: I1125 12:39:40.544858 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:40 crc kubenswrapper[4675]: I1125 12:39:40.545841 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:40 crc kubenswrapper[4675]: I1125 12:39:40.552133 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:40 crc kubenswrapper[4675]: I1125 12:39:40.920545 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6d56f8f4ff-7qprj" Nov 25 12:39:40 crc kubenswrapper[4675]: I1125 12:39:40.975165 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-r2pgw"] Nov 25 12:39:42 crc kubenswrapper[4675]: I1125 12:39:42.932177 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" event={"ID":"48952822-4273-4558-b07e-ad6e9e80dbdf","Type":"ContainerStarted","Data":"60971ac29e8105d07cbf5f719f8b512081be93f8a03ac849b8aaffa0275db4d9"} Nov 25 12:39:42 crc kubenswrapper[4675]: I1125 12:39:42.959261 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-gwgnx" podStartSLOduration=2.704783001 podStartE2EDuration="13.959235559s" podCreationTimestamp="2025-11-25 12:39:29 +0000 UTC" firstStartedPulling="2025-11-25 12:39:30.583991402 +0000 UTC m=+715.755583743" lastFinishedPulling="2025-11-25 12:39:41.83844396 +0000 UTC m=+727.010036301" observedRunningTime="2025-11-25 12:39:42.94943351 +0000 UTC m=+728.121025901" watchObservedRunningTime="2025-11-25 12:39:42.959235559 +0000 UTC m=+728.130827930" Nov 25 12:39:43 crc kubenswrapper[4675]: I1125 12:39:43.662279 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:39:43 crc kubenswrapper[4675]: I1125 12:39:43.662647 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:39:45 crc kubenswrapper[4675]: I1125 12:39:45.446583 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-xzw2f" Nov 25 12:39:50 crc kubenswrapper[4675]: I1125 12:39:50.128685 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-z8hd2" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.028171 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-r2pgw" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerName="console" containerID="cri-o://536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495" gracePeriod=15 Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.367146 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-r2pgw_74086034-016c-4df6-bd1e-c4f99eb3edbe/console/0.log" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.367475 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538109 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-trusted-ca-bundle\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538199 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-config\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538235 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzv7s\" (UniqueName: \"kubernetes.io/projected/74086034-016c-4df6-bd1e-c4f99eb3edbe-kube-api-access-fzv7s\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538258 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-oauth-config\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538282 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-serving-cert\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538417 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-service-ca\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.538481 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-oauth-serving-cert\") pod \"74086034-016c-4df6-bd1e-c4f99eb3edbe\" (UID: \"74086034-016c-4df6-bd1e-c4f99eb3edbe\") " Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.542259 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.542686 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-config" (OuterVolumeSpecName: "console-config") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.567963 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.568038 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-service-ca" (OuterVolumeSpecName: "service-ca") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.596372 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74086034-016c-4df6-bd1e-c4f99eb3edbe-kube-api-access-fzv7s" (OuterVolumeSpecName: "kube-api-access-fzv7s") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "kube-api-access-fzv7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.601619 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.601797 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "74086034-016c-4df6-bd1e-c4f99eb3edbe" (UID: "74086034-016c-4df6-bd1e-c4f99eb3edbe"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639695 4675 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639738 4675 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639778 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzv7s\" (UniqueName: \"kubernetes.io/projected/74086034-016c-4df6-bd1e-c4f99eb3edbe-kube-api-access-fzv7s\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639794 4675 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639806 4675 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/74086034-016c-4df6-bd1e-c4f99eb3edbe-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639837 4675 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:06 crc kubenswrapper[4675]: I1125 12:40:06.639849 4675 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/74086034-016c-4df6-bd1e-c4f99eb3edbe-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.063661 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-r2pgw_74086034-016c-4df6-bd1e-c4f99eb3edbe/console/0.log" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.063717 4675 generic.go:334] "Generic (PLEG): container finished" podID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerID="536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495" exitCode=2 Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.063753 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-r2pgw" event={"ID":"74086034-016c-4df6-bd1e-c4f99eb3edbe","Type":"ContainerDied","Data":"536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495"} Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.063791 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-r2pgw" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.063805 4675 scope.go:117] "RemoveContainer" containerID="536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.063792 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-r2pgw" event={"ID":"74086034-016c-4df6-bd1e-c4f99eb3edbe","Type":"ContainerDied","Data":"ac77d76b2000499f6e8052b6fe3ca5fc67534bd2221c9418df2c1b238eae4886"} Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.076825 4675 scope.go:117] "RemoveContainer" containerID="536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495" Nov 25 12:40:07 crc kubenswrapper[4675]: E1125 12:40:07.077385 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495\": container with ID starting with 536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495 not found: ID does not exist" containerID="536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.077415 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495"} err="failed to get container status \"536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495\": rpc error: code = NotFound desc = could not find container \"536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495\": container with ID starting with 536bab60be0d501a2f1fd010994943935fc54650e0ff35e53ed41d625eff2495 not found: ID does not exist" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.092368 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-r2pgw"] Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.095920 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-r2pgw"] Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.540407 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" path="/var/lib/kubelet/pods/74086034-016c-4df6-bd1e-c4f99eb3edbe/volumes" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.643628 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpfvq"] Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.643884 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerName="controller-manager" containerID="cri-o://fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b" gracePeriod=30 Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.741687 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs"] Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.741935 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" podUID="a4f386bf-2d54-4294-9a96-143e59a150ed" containerName="route-controller-manager" containerID="cri-o://9e7480a36376e7b7b0798cc431b2d8e7b6afbf1a44352defb4d0f04c54720279" gracePeriod=30 Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.900920 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz"] Nov 25 12:40:07 crc kubenswrapper[4675]: E1125 12:40:07.901213 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerName="console" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.901226 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerName="console" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.901328 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="74086034-016c-4df6-bd1e-c4f99eb3edbe" containerName="console" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.902271 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.904389 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 12:40:07 crc kubenswrapper[4675]: I1125 12:40:07.919518 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz"] Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.039319 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.058670 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwwbn\" (UniqueName: \"kubernetes.io/projected/077791d3-2406-48df-96ea-e6c84fa68b89-kube-api-access-rwwbn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.058753 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.058828 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.073339 4675 generic.go:334] "Generic (PLEG): container finished" podID="a4f386bf-2d54-4294-9a96-143e59a150ed" containerID="9e7480a36376e7b7b0798cc431b2d8e7b6afbf1a44352defb4d0f04c54720279" exitCode=0 Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.073459 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" event={"ID":"a4f386bf-2d54-4294-9a96-143e59a150ed","Type":"ContainerDied","Data":"9e7480a36376e7b7b0798cc431b2d8e7b6afbf1a44352defb4d0f04c54720279"} Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.076196 4675 generic.go:334] "Generic (PLEG): container finished" podID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerID="fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b" exitCode=0 Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.076357 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.076301 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" event={"ID":"9dfcfcb3-6d81-425b-98ae-925d3fbf2369","Type":"ContainerDied","Data":"fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b"} Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.076460 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpfvq" event={"ID":"9dfcfcb3-6d81-425b-98ae-925d3fbf2369","Type":"ContainerDied","Data":"df75006f0b6d467b77b674b8ec32cabc98d3e70f0d4cceb03fa5888119886de4"} Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.076480 4675 scope.go:117] "RemoveContainer" containerID="fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.099195 4675 scope.go:117] "RemoveContainer" containerID="fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b" Nov 25 12:40:08 crc kubenswrapper[4675]: E1125 12:40:08.099618 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b\": container with ID starting with fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b not found: ID does not exist" containerID="fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.099645 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b"} err="failed to get container status \"fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b\": rpc error: code = NotFound desc = could not find container \"fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b\": container with ID starting with fd71f4ce628b1de59f8e34bcb1c80d30ad609b042ff3d3b885b2c3dfed63c80b not found: ID does not exist" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.109787 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.159482 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jsz5\" (UniqueName: \"kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5\") pod \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.159899 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") pod \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.159959 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") pod \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.159985 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f386bf-2d54-4294-9a96-143e59a150ed-serving-cert\") pod \"a4f386bf-2d54-4294-9a96-143e59a150ed\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160016 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") pod \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160043 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") pod \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\" (UID: \"9dfcfcb3-6d81-425b-98ae-925d3fbf2369\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160066 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xftds\" (UniqueName: \"kubernetes.io/projected/a4f386bf-2d54-4294-9a96-143e59a150ed-kube-api-access-xftds\") pod \"a4f386bf-2d54-4294-9a96-143e59a150ed\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160094 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-client-ca\") pod \"a4f386bf-2d54-4294-9a96-143e59a150ed\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160128 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-config\") pod \"a4f386bf-2d54-4294-9a96-143e59a150ed\" (UID: \"a4f386bf-2d54-4294-9a96-143e59a150ed\") " Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160295 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160338 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwwbn\" (UniqueName: \"kubernetes.io/projected/077791d3-2406-48df-96ea-e6c84fa68b89-kube-api-access-rwwbn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160399 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160625 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config" (OuterVolumeSpecName: "config") pod "9dfcfcb3-6d81-425b-98ae-925d3fbf2369" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160931 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.160978 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9dfcfcb3-6d81-425b-98ae-925d3fbf2369" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.161055 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-config" (OuterVolumeSpecName: "config") pod "a4f386bf-2d54-4294-9a96-143e59a150ed" (UID: "a4f386bf-2d54-4294-9a96-143e59a150ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.161290 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-client-ca" (OuterVolumeSpecName: "client-ca") pod "a4f386bf-2d54-4294-9a96-143e59a150ed" (UID: "a4f386bf-2d54-4294-9a96-143e59a150ed"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.161317 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.162781 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca" (OuterVolumeSpecName: "client-ca") pod "9dfcfcb3-6d81-425b-98ae-925d3fbf2369" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.165256 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9dfcfcb3-6d81-425b-98ae-925d3fbf2369" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.166969 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5" (OuterVolumeSpecName: "kube-api-access-7jsz5") pod "9dfcfcb3-6d81-425b-98ae-925d3fbf2369" (UID: "9dfcfcb3-6d81-425b-98ae-925d3fbf2369"). InnerVolumeSpecName "kube-api-access-7jsz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.174770 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4f386bf-2d54-4294-9a96-143e59a150ed-kube-api-access-xftds" (OuterVolumeSpecName: "kube-api-access-xftds") pod "a4f386bf-2d54-4294-9a96-143e59a150ed" (UID: "a4f386bf-2d54-4294-9a96-143e59a150ed"). InnerVolumeSpecName "kube-api-access-xftds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.179345 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4f386bf-2d54-4294-9a96-143e59a150ed-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a4f386bf-2d54-4294-9a96-143e59a150ed" (UID: "a4f386bf-2d54-4294-9a96-143e59a150ed"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.182596 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwwbn\" (UniqueName: \"kubernetes.io/projected/077791d3-2406-48df-96ea-e6c84fa68b89-kube-api-access-rwwbn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.230979 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260810 4675 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260848 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f386bf-2d54-4294-9a96-143e59a150ed-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260857 4675 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260866 4675 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260877 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xftds\" (UniqueName: \"kubernetes.io/projected/a4f386bf-2d54-4294-9a96-143e59a150ed-kube-api-access-xftds\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260887 4675 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260895 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4f386bf-2d54-4294-9a96-143e59a150ed-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260902 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jsz5\" (UniqueName: \"kubernetes.io/projected/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-kube-api-access-7jsz5\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.260910 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dfcfcb3-6d81-425b-98ae-925d3fbf2369-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.410313 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpfvq"] Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.412955 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpfvq"] Nov 25 12:40:08 crc kubenswrapper[4675]: I1125 12:40:08.608016 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.050373 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w"] Nov 25 12:40:09 crc kubenswrapper[4675]: E1125 12:40:09.050631 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerName="controller-manager" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.050649 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerName="controller-manager" Nov 25 12:40:09 crc kubenswrapper[4675]: E1125 12:40:09.050664 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4f386bf-2d54-4294-9a96-143e59a150ed" containerName="route-controller-manager" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.050672 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4f386bf-2d54-4294-9a96-143e59a150ed" containerName="route-controller-manager" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.050782 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4f386bf-2d54-4294-9a96-143e59a150ed" containerName="route-controller-manager" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.050804 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" containerName="controller-manager" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.051285 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.065541 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.096736 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.096736 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs" event={"ID":"a4f386bf-2d54-4294-9a96-143e59a150ed","Type":"ContainerDied","Data":"fc57b4bd59d926944caabd2c45da75340d753281c766391afe628bae7c05ca58"} Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.097079 4675 scope.go:117] "RemoveContainer" containerID="9e7480a36376e7b7b0798cc431b2d8e7b6afbf1a44352defb4d0f04c54720279" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.099978 4675 generic.go:334] "Generic (PLEG): container finished" podID="077791d3-2406-48df-96ea-e6c84fa68b89" containerID="739bdc10bd1d54adc50c830aac29b1282ef872bada0f27a8447e6e2ad3529f04" exitCode=0 Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.100006 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" event={"ID":"077791d3-2406-48df-96ea-e6c84fa68b89","Type":"ContainerDied","Data":"739bdc10bd1d54adc50c830aac29b1282ef872bada0f27a8447e6e2ad3529f04"} Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.100025 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" event={"ID":"077791d3-2406-48df-96ea-e6c84fa68b89","Type":"ContainerStarted","Data":"23d6e109016ec0710c94f2bd0e4b1969bbaceaf879ec874ab7aeed77b46f7eef"} Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.130229 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.133995 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mpxxs"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.172243 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnpl9\" (UniqueName: \"kubernetes.io/projected/a48f3cec-8a70-4672-93f0-9af956fccde8-kube-api-access-cnpl9\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.172281 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a48f3cec-8a70-4672-93f0-9af956fccde8-config\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.172510 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a48f3cec-8a70-4672-93f0-9af956fccde8-client-ca\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.172551 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a48f3cec-8a70-4672-93f0-9af956fccde8-serving-cert\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.273630 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a48f3cec-8a70-4672-93f0-9af956fccde8-client-ca\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.273708 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a48f3cec-8a70-4672-93f0-9af956fccde8-serving-cert\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.273749 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnpl9\" (UniqueName: \"kubernetes.io/projected/a48f3cec-8a70-4672-93f0-9af956fccde8-kube-api-access-cnpl9\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.273773 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a48f3cec-8a70-4672-93f0-9af956fccde8-config\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.275447 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a48f3cec-8a70-4672-93f0-9af956fccde8-client-ca\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.275618 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a48f3cec-8a70-4672-93f0-9af956fccde8-config\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.294255 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a48f3cec-8a70-4672-93f0-9af956fccde8-serving-cert\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.305888 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnpl9\" (UniqueName: \"kubernetes.io/projected/a48f3cec-8a70-4672-93f0-9af956fccde8-kube-api-access-cnpl9\") pod \"route-controller-manager-5644fc89c-ckd6w\" (UID: \"a48f3cec-8a70-4672-93f0-9af956fccde8\") " pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.308573 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-77b8844fb6-757qm"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.313484 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.318069 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77b8844fb6-757qm"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.321167 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.322467 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.322771 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.323115 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.323476 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.325214 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.332329 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.378605 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.477066 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-config\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.477111 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-client-ca\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.477156 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-proxy-ca-bundles\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.477192 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-serving-cert\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.477243 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g9sp\" (UniqueName: \"kubernetes.io/projected/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-kube-api-access-7g9sp\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.538142 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dfcfcb3-6d81-425b-98ae-925d3fbf2369" path="/var/lib/kubelet/pods/9dfcfcb3-6d81-425b-98ae-925d3fbf2369/volumes" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.538795 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4f386bf-2d54-4294-9a96-143e59a150ed" path="/var/lib/kubelet/pods/a4f386bf-2d54-4294-9a96-143e59a150ed/volumes" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.571552 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w"] Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.578078 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-serving-cert\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.578200 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g9sp\" (UniqueName: \"kubernetes.io/projected/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-kube-api-access-7g9sp\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.578584 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-config\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.578610 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-client-ca\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.578653 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-proxy-ca-bundles\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.579692 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-client-ca\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.580047 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-proxy-ca-bundles\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.580338 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-config\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.583021 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-serving-cert\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.593712 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g9sp\" (UniqueName: \"kubernetes.io/projected/e09bd3ea-6adc-4088-a8e2-e768f12c15ef-kube-api-access-7g9sp\") pod \"controller-manager-77b8844fb6-757qm\" (UID: \"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\") " pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.655783 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:09 crc kubenswrapper[4675]: I1125 12:40:09.848837 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77b8844fb6-757qm"] Nov 25 12:40:09 crc kubenswrapper[4675]: W1125 12:40:09.855844 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode09bd3ea_6adc_4088_a8e2_e768f12c15ef.slice/crio-c18d03cf923493eba9e62d81dd79bd6ccf19a8841a3f61dcb36591a40c2a0d12 WatchSource:0}: Error finding container c18d03cf923493eba9e62d81dd79bd6ccf19a8841a3f61dcb36591a40c2a0d12: Status 404 returned error can't find the container with id c18d03cf923493eba9e62d81dd79bd6ccf19a8841a3f61dcb36591a40c2a0d12 Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.107951 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" event={"ID":"a48f3cec-8a70-4672-93f0-9af956fccde8","Type":"ContainerStarted","Data":"0b6e7b3f779212c8276b8d0f021feced4fbfc2976e387d54e15d184dafa308f0"} Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.108300 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" event={"ID":"a48f3cec-8a70-4672-93f0-9af956fccde8","Type":"ContainerStarted","Data":"7a24be6c701f219cc399145b939c9066f6cc81cc1d7c0d73bb700e3625412490"} Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.108325 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.111180 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" event={"ID":"e09bd3ea-6adc-4088-a8e2-e768f12c15ef","Type":"ContainerStarted","Data":"70793513c627ada75e2f0bd5632d6705c0d9e4bdee28a13b996e7c77d0e1ae18"} Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.111223 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" event={"ID":"e09bd3ea-6adc-4088-a8e2-e768f12c15ef","Type":"ContainerStarted","Data":"c18d03cf923493eba9e62d81dd79bd6ccf19a8841a3f61dcb36591a40c2a0d12"} Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.112023 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.128439 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.131789 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" podStartSLOduration=1.131768818 podStartE2EDuration="1.131768818s" podCreationTimestamp="2025-11-25 12:40:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:40:10.128704214 +0000 UTC m=+755.300296575" watchObservedRunningTime="2025-11-25 12:40:10.131768818 +0000 UTC m=+755.303361159" Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.327546 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5644fc89c-ckd6w" Nov 25 12:40:10 crc kubenswrapper[4675]: I1125 12:40:10.350065 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-77b8844fb6-757qm" podStartSLOduration=3.350045656 podStartE2EDuration="3.350045656s" podCreationTimestamp="2025-11-25 12:40:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:40:10.151475329 +0000 UTC m=+755.323067660" watchObservedRunningTime="2025-11-25 12:40:10.350045656 +0000 UTC m=+755.521637997" Nov 25 12:40:11 crc kubenswrapper[4675]: I1125 12:40:11.143657 4675 generic.go:334] "Generic (PLEG): container finished" podID="077791d3-2406-48df-96ea-e6c84fa68b89" containerID="af05020545851308bb8082685dbe2a081453b52371ccc1cbc22165fda815ab94" exitCode=0 Nov 25 12:40:11 crc kubenswrapper[4675]: I1125 12:40:11.143771 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" event={"ID":"077791d3-2406-48df-96ea-e6c84fa68b89","Type":"ContainerDied","Data":"af05020545851308bb8082685dbe2a081453b52371ccc1cbc22165fda815ab94"} Nov 25 12:40:12 crc kubenswrapper[4675]: I1125 12:40:12.151192 4675 generic.go:334] "Generic (PLEG): container finished" podID="077791d3-2406-48df-96ea-e6c84fa68b89" containerID="5d925ac1f57a668a16fa0ef760e41490e51f045e2eccfbb59193cd2a664c3264" exitCode=0 Nov 25 12:40:12 crc kubenswrapper[4675]: I1125 12:40:12.151232 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" event={"ID":"077791d3-2406-48df-96ea-e6c84fa68b89","Type":"ContainerDied","Data":"5d925ac1f57a668a16fa0ef760e41490e51f045e2eccfbb59193cd2a664c3264"} Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.431524 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.626038 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-bundle\") pod \"077791d3-2406-48df-96ea-e6c84fa68b89\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.626127 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-util\") pod \"077791d3-2406-48df-96ea-e6c84fa68b89\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.626148 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwwbn\" (UniqueName: \"kubernetes.io/projected/077791d3-2406-48df-96ea-e6c84fa68b89-kube-api-access-rwwbn\") pod \"077791d3-2406-48df-96ea-e6c84fa68b89\" (UID: \"077791d3-2406-48df-96ea-e6c84fa68b89\") " Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.627387 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-bundle" (OuterVolumeSpecName: "bundle") pod "077791d3-2406-48df-96ea-e6c84fa68b89" (UID: "077791d3-2406-48df-96ea-e6c84fa68b89"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.634632 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/077791d3-2406-48df-96ea-e6c84fa68b89-kube-api-access-rwwbn" (OuterVolumeSpecName: "kube-api-access-rwwbn") pod "077791d3-2406-48df-96ea-e6c84fa68b89" (UID: "077791d3-2406-48df-96ea-e6c84fa68b89"). InnerVolumeSpecName "kube-api-access-rwwbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.648968 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-util" (OuterVolumeSpecName: "util") pod "077791d3-2406-48df-96ea-e6c84fa68b89" (UID: "077791d3-2406-48df-96ea-e6c84fa68b89"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.662199 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.662281 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.662341 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.663122 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b3c1e1ef52f16910a00a563d21fee3feb92e0dcf81ba47871ed8bc9505c87d4"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.663212 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://4b3c1e1ef52f16910a00a563d21fee3feb92e0dcf81ba47871ed8bc9505c87d4" gracePeriod=600 Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.727386 4675 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.727665 4675 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/077791d3-2406-48df-96ea-e6c84fa68b89-util\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:13 crc kubenswrapper[4675]: I1125 12:40:13.727676 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwwbn\" (UniqueName: \"kubernetes.io/projected/077791d3-2406-48df-96ea-e6c84fa68b89-kube-api-access-rwwbn\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.164655 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="4b3c1e1ef52f16910a00a563d21fee3feb92e0dcf81ba47871ed8bc9505c87d4" exitCode=0 Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.164700 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"4b3c1e1ef52f16910a00a563d21fee3feb92e0dcf81ba47871ed8bc9505c87d4"} Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.165077 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"cc913b0681d35b11dd746803fa5089245c866bc325aff60631a14bc726556c0f"} Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.165098 4675 scope.go:117] "RemoveContainer" containerID="e784210085ab24e660932292acaaa3ea622d553f34a1b01288fb02da752c810b" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.167621 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" event={"ID":"077791d3-2406-48df-96ea-e6c84fa68b89","Type":"ContainerDied","Data":"23d6e109016ec0710c94f2bd0e4b1969bbaceaf879ec874ab7aeed77b46f7eef"} Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.167676 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23d6e109016ec0710c94f2bd0e4b1969bbaceaf879ec874ab7aeed77b46f7eef" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.167763 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.932411 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p4ltl"] Nov 25 12:40:14 crc kubenswrapper[4675]: E1125 12:40:14.932956 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="util" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.932968 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="util" Nov 25 12:40:14 crc kubenswrapper[4675]: E1125 12:40:14.932979 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="pull" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.932984 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="pull" Nov 25 12:40:14 crc kubenswrapper[4675]: E1125 12:40:14.932997 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="extract" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.933002 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="extract" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.933097 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="077791d3-2406-48df-96ea-e6c84fa68b89" containerName="extract" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.933797 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.943975 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsdk8\" (UniqueName: \"kubernetes.io/projected/19212445-24e3-4a10-a5d8-ef14210281c0-kube-api-access-qsdk8\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.944015 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-catalog-content\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.944057 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-utilities\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:14 crc kubenswrapper[4675]: I1125 12:40:14.948598 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p4ltl"] Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.045023 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsdk8\" (UniqueName: \"kubernetes.io/projected/19212445-24e3-4a10-a5d8-ef14210281c0-kube-api-access-qsdk8\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.045253 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-catalog-content\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.045308 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-utilities\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.045707 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-utilities\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.045912 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-catalog-content\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.075681 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsdk8\" (UniqueName: \"kubernetes.io/projected/19212445-24e3-4a10-a5d8-ef14210281c0-kube-api-access-qsdk8\") pod \"redhat-operators-p4ltl\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.247867 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:15 crc kubenswrapper[4675]: I1125 12:40:15.747825 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p4ltl"] Nov 25 12:40:15 crc kubenswrapper[4675]: W1125 12:40:15.755540 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19212445_24e3_4a10_a5d8_ef14210281c0.slice/crio-fa01c8474111d251cc95a60c3946c4c11141e060b27fbc65acb990e734431366 WatchSource:0}: Error finding container fa01c8474111d251cc95a60c3946c4c11141e060b27fbc65acb990e734431366: Status 404 returned error can't find the container with id fa01c8474111d251cc95a60c3946c4c11141e060b27fbc65acb990e734431366 Nov 25 12:40:16 crc kubenswrapper[4675]: I1125 12:40:16.181374 4675 generic.go:334] "Generic (PLEG): container finished" podID="19212445-24e3-4a10-a5d8-ef14210281c0" containerID="2ca204fb8b20b5e1ca0b70a2af29445ab4615f1b242a3a4196f7217cb30fcb35" exitCode=0 Nov 25 12:40:16 crc kubenswrapper[4675]: I1125 12:40:16.181418 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerDied","Data":"2ca204fb8b20b5e1ca0b70a2af29445ab4615f1b242a3a4196f7217cb30fcb35"} Nov 25 12:40:16 crc kubenswrapper[4675]: I1125 12:40:16.181447 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerStarted","Data":"fa01c8474111d251cc95a60c3946c4c11141e060b27fbc65acb990e734431366"} Nov 25 12:40:16 crc kubenswrapper[4675]: I1125 12:40:16.961568 4675 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 12:40:17 crc kubenswrapper[4675]: I1125 12:40:17.187480 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerStarted","Data":"81f31a450962391e5a0322f67d8f604586711ad2437425095eca8168a46a2593"} Nov 25 12:40:18 crc kubenswrapper[4675]: I1125 12:40:18.195177 4675 generic.go:334] "Generic (PLEG): container finished" podID="19212445-24e3-4a10-a5d8-ef14210281c0" containerID="81f31a450962391e5a0322f67d8f604586711ad2437425095eca8168a46a2593" exitCode=0 Nov 25 12:40:18 crc kubenswrapper[4675]: I1125 12:40:18.195276 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerDied","Data":"81f31a450962391e5a0322f67d8f604586711ad2437425095eca8168a46a2593"} Nov 25 12:40:19 crc kubenswrapper[4675]: I1125 12:40:19.203561 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerStarted","Data":"674e9b9462d0ac18364f8d30032d13b630a51d147586aee7095e8e25b7bcd4a2"} Nov 25 12:40:19 crc kubenswrapper[4675]: I1125 12:40:19.219553 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p4ltl" podStartSLOduration=2.527709512 podStartE2EDuration="5.219535004s" podCreationTimestamp="2025-11-25 12:40:14 +0000 UTC" firstStartedPulling="2025-11-25 12:40:16.182492681 +0000 UTC m=+761.354085022" lastFinishedPulling="2025-11-25 12:40:18.874318173 +0000 UTC m=+764.045910514" observedRunningTime="2025-11-25 12:40:19.217638486 +0000 UTC m=+764.389230847" watchObservedRunningTime="2025-11-25 12:40:19.219535004 +0000 UTC m=+764.391127345" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.663902 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5db469f446-85gxv"] Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.665105 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.671703 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.671878 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-8qmg9" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.672017 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.672659 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.673651 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.693271 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5db469f446-85gxv"] Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.748269 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eb100a90-931c-4daa-8466-49a1ae50185b-webhook-cert\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.748341 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eb100a90-931c-4daa-8466-49a1ae50185b-apiservice-cert\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.748419 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb7mb\" (UniqueName: \"kubernetes.io/projected/eb100a90-931c-4daa-8466-49a1ae50185b-kube-api-access-zb7mb\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.849308 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eb100a90-931c-4daa-8466-49a1ae50185b-webhook-cert\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.849352 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eb100a90-931c-4daa-8466-49a1ae50185b-apiservice-cert\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.849425 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb7mb\" (UniqueName: \"kubernetes.io/projected/eb100a90-931c-4daa-8466-49a1ae50185b-kube-api-access-zb7mb\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.856753 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eb100a90-931c-4daa-8466-49a1ae50185b-webhook-cert\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.863459 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eb100a90-931c-4daa-8466-49a1ae50185b-apiservice-cert\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.880102 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb7mb\" (UniqueName: \"kubernetes.io/projected/eb100a90-931c-4daa-8466-49a1ae50185b-kube-api-access-zb7mb\") pod \"metallb-operator-controller-manager-5db469f446-85gxv\" (UID: \"eb100a90-931c-4daa-8466-49a1ae50185b\") " pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.948128 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5745948454-t8f8s"] Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.948967 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.950982 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-ts4mt" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.952274 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.952282 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.976934 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5745948454-t8f8s"] Nov 25 12:40:22 crc kubenswrapper[4675]: I1125 12:40:22.981961 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.052119 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fa47f959-6c90-4cbf-b9a2-1d1e152414da-webhook-cert\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.052658 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdmm6\" (UniqueName: \"kubernetes.io/projected/fa47f959-6c90-4cbf-b9a2-1d1e152414da-kube-api-access-gdmm6\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.052785 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fa47f959-6c90-4cbf-b9a2-1d1e152414da-apiservice-cert\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.154219 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fa47f959-6c90-4cbf-b9a2-1d1e152414da-apiservice-cert\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.154306 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fa47f959-6c90-4cbf-b9a2-1d1e152414da-webhook-cert\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.154337 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdmm6\" (UniqueName: \"kubernetes.io/projected/fa47f959-6c90-4cbf-b9a2-1d1e152414da-kube-api-access-gdmm6\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.160521 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fa47f959-6c90-4cbf-b9a2-1d1e152414da-webhook-cert\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.161593 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fa47f959-6c90-4cbf-b9a2-1d1e152414da-apiservice-cert\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.180706 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdmm6\" (UniqueName: \"kubernetes.io/projected/fa47f959-6c90-4cbf-b9a2-1d1e152414da-kube-api-access-gdmm6\") pod \"metallb-operator-webhook-server-5745948454-t8f8s\" (UID: \"fa47f959-6c90-4cbf-b9a2-1d1e152414da\") " pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.264921 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.552787 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5db469f446-85gxv"] Nov 25 12:40:23 crc kubenswrapper[4675]: W1125 12:40:23.563303 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb100a90_931c_4daa_8466_49a1ae50185b.slice/crio-844e3a6a520627422393a55a2f43b19e4fb917087655cac76edb00b9bd79135d WatchSource:0}: Error finding container 844e3a6a520627422393a55a2f43b19e4fb917087655cac76edb00b9bd79135d: Status 404 returned error can't find the container with id 844e3a6a520627422393a55a2f43b19e4fb917087655cac76edb00b9bd79135d Nov 25 12:40:23 crc kubenswrapper[4675]: I1125 12:40:23.919672 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5745948454-t8f8s"] Nov 25 12:40:23 crc kubenswrapper[4675]: W1125 12:40:23.944474 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa47f959_6c90_4cbf_b9a2_1d1e152414da.slice/crio-1d8c552170b97158a28b66694ced79741fc13efbdc0b21a4c29258ee172fc3ee WatchSource:0}: Error finding container 1d8c552170b97158a28b66694ced79741fc13efbdc0b21a4c29258ee172fc3ee: Status 404 returned error can't find the container with id 1d8c552170b97158a28b66694ced79741fc13efbdc0b21a4c29258ee172fc3ee Nov 25 12:40:24 crc kubenswrapper[4675]: I1125 12:40:24.247776 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" event={"ID":"eb100a90-931c-4daa-8466-49a1ae50185b","Type":"ContainerStarted","Data":"844e3a6a520627422393a55a2f43b19e4fb917087655cac76edb00b9bd79135d"} Nov 25 12:40:24 crc kubenswrapper[4675]: I1125 12:40:24.249519 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" event={"ID":"fa47f959-6c90-4cbf-b9a2-1d1e152414da","Type":"ContainerStarted","Data":"1d8c552170b97158a28b66694ced79741fc13efbdc0b21a4c29258ee172fc3ee"} Nov 25 12:40:25 crc kubenswrapper[4675]: I1125 12:40:25.248428 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:25 crc kubenswrapper[4675]: I1125 12:40:25.249685 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:25 crc kubenswrapper[4675]: I1125 12:40:25.294506 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:26 crc kubenswrapper[4675]: I1125 12:40:26.326909 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.340718 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-msq9w"] Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.342047 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.346736 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-msq9w"] Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.439392 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-utilities\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.439474 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-catalog-content\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.439534 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2d8n\" (UniqueName: \"kubernetes.io/projected/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-kube-api-access-s2d8n\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.541090 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-catalog-content\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.541125 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2d8n\" (UniqueName: \"kubernetes.io/projected/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-kube-api-access-s2d8n\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.541182 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-utilities\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.541604 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-catalog-content\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.541651 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-utilities\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.561541 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2d8n\" (UniqueName: \"kubernetes.io/projected/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-kube-api-access-s2d8n\") pod \"community-operators-msq9w\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:27 crc kubenswrapper[4675]: I1125 12:40:27.660320 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:28 crc kubenswrapper[4675]: I1125 12:40:28.724871 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p4ltl"] Nov 25 12:40:28 crc kubenswrapper[4675]: I1125 12:40:28.725125 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p4ltl" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="registry-server" containerID="cri-o://674e9b9462d0ac18364f8d30032d13b630a51d147586aee7095e8e25b7bcd4a2" gracePeriod=2 Nov 25 12:40:29 crc kubenswrapper[4675]: I1125 12:40:29.304939 4675 generic.go:334] "Generic (PLEG): container finished" podID="19212445-24e3-4a10-a5d8-ef14210281c0" containerID="674e9b9462d0ac18364f8d30032d13b630a51d147586aee7095e8e25b7bcd4a2" exitCode=0 Nov 25 12:40:29 crc kubenswrapper[4675]: I1125 12:40:29.305011 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerDied","Data":"674e9b9462d0ac18364f8d30032d13b630a51d147586aee7095e8e25b7bcd4a2"} Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.020143 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-msq9w"] Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.102335 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.197384 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-catalog-content\") pod \"19212445-24e3-4a10-a5d8-ef14210281c0\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.197502 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-utilities\") pod \"19212445-24e3-4a10-a5d8-ef14210281c0\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.198804 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-utilities" (OuterVolumeSpecName: "utilities") pod "19212445-24e3-4a10-a5d8-ef14210281c0" (UID: "19212445-24e3-4a10-a5d8-ef14210281c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.198972 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsdk8\" (UniqueName: \"kubernetes.io/projected/19212445-24e3-4a10-a5d8-ef14210281c0-kube-api-access-qsdk8\") pod \"19212445-24e3-4a10-a5d8-ef14210281c0\" (UID: \"19212445-24e3-4a10-a5d8-ef14210281c0\") " Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.200303 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.204853 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19212445-24e3-4a10-a5d8-ef14210281c0-kube-api-access-qsdk8" (OuterVolumeSpecName: "kube-api-access-qsdk8") pod "19212445-24e3-4a10-a5d8-ef14210281c0" (UID: "19212445-24e3-4a10-a5d8-ef14210281c0"). InnerVolumeSpecName "kube-api-access-qsdk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.290214 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "19212445-24e3-4a10-a5d8-ef14210281c0" (UID: "19212445-24e3-4a10-a5d8-ef14210281c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.302500 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsdk8\" (UniqueName: \"kubernetes.io/projected/19212445-24e3-4a10-a5d8-ef14210281c0-kube-api-access-qsdk8\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.302549 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19212445-24e3-4a10-a5d8-ef14210281c0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.316520 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p4ltl" event={"ID":"19212445-24e3-4a10-a5d8-ef14210281c0","Type":"ContainerDied","Data":"fa01c8474111d251cc95a60c3946c4c11141e060b27fbc65acb990e734431366"} Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.316575 4675 scope.go:117] "RemoveContainer" containerID="674e9b9462d0ac18364f8d30032d13b630a51d147586aee7095e8e25b7bcd4a2" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.316797 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p4ltl" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.320218 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" event={"ID":"eb100a90-931c-4daa-8466-49a1ae50185b","Type":"ContainerStarted","Data":"de89a8ed7880b57083e71241b537176b458e508d2823635acc38832d974e99fb"} Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.320534 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.325173 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" event={"ID":"fa47f959-6c90-4cbf-b9a2-1d1e152414da","Type":"ContainerStarted","Data":"89e2f6ecf37e2b130b83bff3c5ab18054068e38c29a3b12df3f2c79771d52a9a"} Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.325259 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.327409 4675 generic.go:334] "Generic (PLEG): container finished" podID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerID="68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0" exitCode=0 Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.327454 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerDied","Data":"68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0"} Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.327659 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerStarted","Data":"d02447406e12b5fb666085f73d9685358fcf7bde5a7b0df2024f78ec22ed223e"} Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.332731 4675 scope.go:117] "RemoveContainer" containerID="81f31a450962391e5a0322f67d8f604586711ad2437425095eca8168a46a2593" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.349526 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" podStartSLOduration=2.358507965 podStartE2EDuration="9.34951066s" podCreationTimestamp="2025-11-25 12:40:22 +0000 UTC" firstStartedPulling="2025-11-25 12:40:23.567407823 +0000 UTC m=+768.739000174" lastFinishedPulling="2025-11-25 12:40:30.558410528 +0000 UTC m=+775.730002869" observedRunningTime="2025-11-25 12:40:31.349207151 +0000 UTC m=+776.520799552" watchObservedRunningTime="2025-11-25 12:40:31.34951066 +0000 UTC m=+776.521103001" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.369881 4675 scope.go:117] "RemoveContainer" containerID="2ca204fb8b20b5e1ca0b70a2af29445ab4615f1b242a3a4196f7217cb30fcb35" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.381252 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" podStartSLOduration=2.746684606 podStartE2EDuration="9.381231528s" podCreationTimestamp="2025-11-25 12:40:22 +0000 UTC" firstStartedPulling="2025-11-25 12:40:23.945723963 +0000 UTC m=+769.117316304" lastFinishedPulling="2025-11-25 12:40:30.580270885 +0000 UTC m=+775.751863226" observedRunningTime="2025-11-25 12:40:31.376362069 +0000 UTC m=+776.547954440" watchObservedRunningTime="2025-11-25 12:40:31.381231528 +0000 UTC m=+776.552823879" Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.430870 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p4ltl"] Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.437270 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p4ltl"] Nov 25 12:40:31 crc kubenswrapper[4675]: I1125 12:40:31.540900 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" path="/var/lib/kubelet/pods/19212445-24e3-4a10-a5d8-ef14210281c0/volumes" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.132473 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h5mqr"] Nov 25 12:40:32 crc kubenswrapper[4675]: E1125 12:40:32.134169 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="extract-content" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.134301 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="extract-content" Nov 25 12:40:32 crc kubenswrapper[4675]: E1125 12:40:32.134371 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="registry-server" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.134434 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="registry-server" Nov 25 12:40:32 crc kubenswrapper[4675]: E1125 12:40:32.134509 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="extract-utilities" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.134572 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="extract-utilities" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.134770 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="19212445-24e3-4a10-a5d8-ef14210281c0" containerName="registry-server" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.135856 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.149131 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h5mqr"] Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.217793 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-catalog-content\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.218074 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwbf6\" (UniqueName: \"kubernetes.io/projected/615f874d-ef65-4460-96bd-613bf7d43d74-kube-api-access-bwbf6\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.218166 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-utilities\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.319612 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-catalog-content\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.319681 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwbf6\" (UniqueName: \"kubernetes.io/projected/615f874d-ef65-4460-96bd-613bf7d43d74-kube-api-access-bwbf6\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.319721 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-utilities\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.320261 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-utilities\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.320374 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-catalog-content\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.334879 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerStarted","Data":"199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a"} Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.350924 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwbf6\" (UniqueName: \"kubernetes.io/projected/615f874d-ef65-4460-96bd-613bf7d43d74-kube-api-access-bwbf6\") pod \"redhat-marketplace-h5mqr\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.450649 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:32 crc kubenswrapper[4675]: I1125 12:40:32.931575 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h5mqr"] Nov 25 12:40:32 crc kubenswrapper[4675]: W1125 12:40:32.940986 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod615f874d_ef65_4460_96bd_613bf7d43d74.slice/crio-002620c7158d4a7c200b9e6a948cd8e1d368bea78a9117e75d6024ac3e1a1c0c WatchSource:0}: Error finding container 002620c7158d4a7c200b9e6a948cd8e1d368bea78a9117e75d6024ac3e1a1c0c: Status 404 returned error can't find the container with id 002620c7158d4a7c200b9e6a948cd8e1d368bea78a9117e75d6024ac3e1a1c0c Nov 25 12:40:33 crc kubenswrapper[4675]: I1125 12:40:33.344091 4675 generic.go:334] "Generic (PLEG): container finished" podID="615f874d-ef65-4460-96bd-613bf7d43d74" containerID="665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa" exitCode=0 Nov 25 12:40:33 crc kubenswrapper[4675]: I1125 12:40:33.344167 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h5mqr" event={"ID":"615f874d-ef65-4460-96bd-613bf7d43d74","Type":"ContainerDied","Data":"665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa"} Nov 25 12:40:33 crc kubenswrapper[4675]: I1125 12:40:33.344197 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h5mqr" event={"ID":"615f874d-ef65-4460-96bd-613bf7d43d74","Type":"ContainerStarted","Data":"002620c7158d4a7c200b9e6a948cd8e1d368bea78a9117e75d6024ac3e1a1c0c"} Nov 25 12:40:33 crc kubenswrapper[4675]: I1125 12:40:33.347382 4675 generic.go:334] "Generic (PLEG): container finished" podID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerID="199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a" exitCode=0 Nov 25 12:40:33 crc kubenswrapper[4675]: I1125 12:40:33.347447 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerDied","Data":"199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a"} Nov 25 12:40:34 crc kubenswrapper[4675]: I1125 12:40:34.356616 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerStarted","Data":"a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f"} Nov 25 12:40:35 crc kubenswrapper[4675]: I1125 12:40:35.366117 4675 generic.go:334] "Generic (PLEG): container finished" podID="615f874d-ef65-4460-96bd-613bf7d43d74" containerID="739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb" exitCode=0 Nov 25 12:40:35 crc kubenswrapper[4675]: I1125 12:40:35.366259 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h5mqr" event={"ID":"615f874d-ef65-4460-96bd-613bf7d43d74","Type":"ContainerDied","Data":"739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb"} Nov 25 12:40:35 crc kubenswrapper[4675]: I1125 12:40:35.409865 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-msq9w" podStartSLOduration=5.578430487 podStartE2EDuration="8.409849128s" podCreationTimestamp="2025-11-25 12:40:27 +0000 UTC" firstStartedPulling="2025-11-25 12:40:31.332678467 +0000 UTC m=+776.504270808" lastFinishedPulling="2025-11-25 12:40:34.164097108 +0000 UTC m=+779.335689449" observedRunningTime="2025-11-25 12:40:35.409685973 +0000 UTC m=+780.581278324" watchObservedRunningTime="2025-11-25 12:40:35.409849128 +0000 UTC m=+780.581441469" Nov 25 12:40:37 crc kubenswrapper[4675]: I1125 12:40:37.379305 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h5mqr" event={"ID":"615f874d-ef65-4460-96bd-613bf7d43d74","Type":"ContainerStarted","Data":"3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309"} Nov 25 12:40:37 crc kubenswrapper[4675]: I1125 12:40:37.395637 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h5mqr" podStartSLOduration=2.184997394 podStartE2EDuration="5.395620682s" podCreationTimestamp="2025-11-25 12:40:32 +0000 UTC" firstStartedPulling="2025-11-25 12:40:33.345723543 +0000 UTC m=+778.517315894" lastFinishedPulling="2025-11-25 12:40:36.556346841 +0000 UTC m=+781.727939182" observedRunningTime="2025-11-25 12:40:37.394240131 +0000 UTC m=+782.565832472" watchObservedRunningTime="2025-11-25 12:40:37.395620682 +0000 UTC m=+782.567213023" Nov 25 12:40:37 crc kubenswrapper[4675]: I1125 12:40:37.660890 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:37 crc kubenswrapper[4675]: I1125 12:40:37.660950 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:37 crc kubenswrapper[4675]: I1125 12:40:37.707419 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:42 crc kubenswrapper[4675]: I1125 12:40:42.452033 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:42 crc kubenswrapper[4675]: I1125 12:40:42.452333 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:42 crc kubenswrapper[4675]: I1125 12:40:42.496398 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:43 crc kubenswrapper[4675]: I1125 12:40:43.295889 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5745948454-t8f8s" Nov 25 12:40:43 crc kubenswrapper[4675]: I1125 12:40:43.497429 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:45 crc kubenswrapper[4675]: I1125 12:40:45.126962 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h5mqr"] Nov 25 12:40:45 crc kubenswrapper[4675]: I1125 12:40:45.455353 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h5mqr" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="registry-server" containerID="cri-o://3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309" gracePeriod=2 Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.459698 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.461675 4675 generic.go:334] "Generic (PLEG): container finished" podID="615f874d-ef65-4460-96bd-613bf7d43d74" containerID="3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309" exitCode=0 Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.461717 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h5mqr" event={"ID":"615f874d-ef65-4460-96bd-613bf7d43d74","Type":"ContainerDied","Data":"3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309"} Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.461744 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h5mqr" event={"ID":"615f874d-ef65-4460-96bd-613bf7d43d74","Type":"ContainerDied","Data":"002620c7158d4a7c200b9e6a948cd8e1d368bea78a9117e75d6024ac3e1a1c0c"} Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.461767 4675 scope.go:117] "RemoveContainer" containerID="3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.461781 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h5mqr" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.478765 4675 scope.go:117] "RemoveContainer" containerID="739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.493658 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-catalog-content\") pod \"615f874d-ef65-4460-96bd-613bf7d43d74\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.493724 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-utilities\") pod \"615f874d-ef65-4460-96bd-613bf7d43d74\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.493743 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwbf6\" (UniqueName: \"kubernetes.io/projected/615f874d-ef65-4460-96bd-613bf7d43d74-kube-api-access-bwbf6\") pod \"615f874d-ef65-4460-96bd-613bf7d43d74\" (UID: \"615f874d-ef65-4460-96bd-613bf7d43d74\") " Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.494648 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-utilities" (OuterVolumeSpecName: "utilities") pod "615f874d-ef65-4460-96bd-613bf7d43d74" (UID: "615f874d-ef65-4460-96bd-613bf7d43d74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.497435 4675 scope.go:117] "RemoveContainer" containerID="665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.506952 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/615f874d-ef65-4460-96bd-613bf7d43d74-kube-api-access-bwbf6" (OuterVolumeSpecName: "kube-api-access-bwbf6") pod "615f874d-ef65-4460-96bd-613bf7d43d74" (UID: "615f874d-ef65-4460-96bd-613bf7d43d74"). InnerVolumeSpecName "kube-api-access-bwbf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.515917 4675 scope.go:117] "RemoveContainer" containerID="3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309" Nov 25 12:40:46 crc kubenswrapper[4675]: E1125 12:40:46.516596 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309\": container with ID starting with 3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309 not found: ID does not exist" containerID="3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.516641 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309"} err="failed to get container status \"3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309\": rpc error: code = NotFound desc = could not find container \"3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309\": container with ID starting with 3fe191997ae8b8b7454543851473c96224c546176b414cbeec2c6874b23d2309 not found: ID does not exist" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.516666 4675 scope.go:117] "RemoveContainer" containerID="739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb" Nov 25 12:40:46 crc kubenswrapper[4675]: E1125 12:40:46.517283 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb\": container with ID starting with 739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb not found: ID does not exist" containerID="739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.517362 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb"} err="failed to get container status \"739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb\": rpc error: code = NotFound desc = could not find container \"739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb\": container with ID starting with 739e285630b8e85c6ee3e6ead4380298570aff0cc2744bb476e287c2f692e8bb not found: ID does not exist" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.517424 4675 scope.go:117] "RemoveContainer" containerID="665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa" Nov 25 12:40:46 crc kubenswrapper[4675]: E1125 12:40:46.518386 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa\": container with ID starting with 665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa not found: ID does not exist" containerID="665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.518407 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa"} err="failed to get container status \"665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa\": rpc error: code = NotFound desc = could not find container \"665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa\": container with ID starting with 665ab02f1260cee37f4cc703b4b632ed0b00be9765dd64f07647e05acbcebdaa not found: ID does not exist" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.530493 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "615f874d-ef65-4460-96bd-613bf7d43d74" (UID: "615f874d-ef65-4460-96bd-613bf7d43d74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.594776 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.594807 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615f874d-ef65-4460-96bd-613bf7d43d74-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.594878 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwbf6\" (UniqueName: \"kubernetes.io/projected/615f874d-ef65-4460-96bd-613bf7d43d74-kube-api-access-bwbf6\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.785856 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h5mqr"] Nov 25 12:40:46 crc kubenswrapper[4675]: I1125 12:40:46.797301 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h5mqr"] Nov 25 12:40:47 crc kubenswrapper[4675]: I1125 12:40:47.539527 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" path="/var/lib/kubelet/pods/615f874d-ef65-4460-96bd-613bf7d43d74/volumes" Nov 25 12:40:47 crc kubenswrapper[4675]: I1125 12:40:47.709452 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.526136 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-msq9w"] Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.526675 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-msq9w" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="registry-server" containerID="cri-o://a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f" gracePeriod=2 Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.910604 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.954610 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-catalog-content\") pod \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.954673 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2d8n\" (UniqueName: \"kubernetes.io/projected/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-kube-api-access-s2d8n\") pod \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.954709 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-utilities\") pod \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\" (UID: \"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4\") " Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.955840 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-utilities" (OuterVolumeSpecName: "utilities") pod "3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" (UID: "3e76b28c-cae7-4f1e-a0ed-87891fec6ed4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:51 crc kubenswrapper[4675]: I1125 12:40:51.963026 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-kube-api-access-s2d8n" (OuterVolumeSpecName: "kube-api-access-s2d8n") pod "3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" (UID: "3e76b28c-cae7-4f1e-a0ed-87891fec6ed4"). InnerVolumeSpecName "kube-api-access-s2d8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.007120 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" (UID: "3e76b28c-cae7-4f1e-a0ed-87891fec6ed4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.055923 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.055966 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2d8n\" (UniqueName: \"kubernetes.io/projected/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-kube-api-access-s2d8n\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.055977 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.500260 4675 generic.go:334] "Generic (PLEG): container finished" podID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerID="a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f" exitCode=0 Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.500303 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerDied","Data":"a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f"} Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.500342 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-msq9w" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.500361 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-msq9w" event={"ID":"3e76b28c-cae7-4f1e-a0ed-87891fec6ed4","Type":"ContainerDied","Data":"d02447406e12b5fb666085f73d9685358fcf7bde5a7b0df2024f78ec22ed223e"} Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.500387 4675 scope.go:117] "RemoveContainer" containerID="a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.516070 4675 scope.go:117] "RemoveContainer" containerID="199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.529163 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-msq9w"] Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.538574 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-msq9w"] Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.538704 4675 scope.go:117] "RemoveContainer" containerID="68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.566526 4675 scope.go:117] "RemoveContainer" containerID="a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f" Nov 25 12:40:52 crc kubenswrapper[4675]: E1125 12:40:52.567991 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f\": container with ID starting with a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f not found: ID does not exist" containerID="a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.568039 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f"} err="failed to get container status \"a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f\": rpc error: code = NotFound desc = could not find container \"a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f\": container with ID starting with a975b9c0f5059545ebd675c93cbad6c1a054b9350b19e03e00a28180f977343f not found: ID does not exist" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.568068 4675 scope.go:117] "RemoveContainer" containerID="199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a" Nov 25 12:40:52 crc kubenswrapper[4675]: E1125 12:40:52.568361 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a\": container with ID starting with 199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a not found: ID does not exist" containerID="199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.568454 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a"} err="failed to get container status \"199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a\": rpc error: code = NotFound desc = could not find container \"199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a\": container with ID starting with 199b1fdf0e0335907d2d434c0c2df5f36fc1d1ed1405ba02023d81884c988c9a not found: ID does not exist" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.568534 4675 scope.go:117] "RemoveContainer" containerID="68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0" Nov 25 12:40:52 crc kubenswrapper[4675]: E1125 12:40:52.568850 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0\": container with ID starting with 68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0 not found: ID does not exist" containerID="68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0" Nov 25 12:40:52 crc kubenswrapper[4675]: I1125 12:40:52.568950 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0"} err="failed to get container status \"68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0\": rpc error: code = NotFound desc = could not find container \"68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0\": container with ID starting with 68bf022d457012f2638859d218a2c1353f5d6f4d9527f32cc53a4e2736b2a5d0 not found: ID does not exist" Nov 25 12:40:53 crc kubenswrapper[4675]: I1125 12:40:53.540127 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" path="/var/lib/kubelet/pods/3e76b28c-cae7-4f1e-a0ed-87891fec6ed4/volumes" Nov 25 12:41:02 crc kubenswrapper[4675]: I1125 12:41:02.983967 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.667524 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-45wff"] Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.668042 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="registry-server" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668122 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="registry-server" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.668188 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="registry-server" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668241 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="registry-server" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.668297 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="extract-content" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668364 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="extract-content" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.668422 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="extract-utilities" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668481 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="extract-utilities" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.668541 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="extract-utilities" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668595 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="extract-utilities" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.668655 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="extract-content" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668708 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="extract-content" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668887 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="615f874d-ef65-4460-96bd-613bf7d43d74" containerName="registry-server" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.668991 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e76b28c-cae7-4f1e-a0ed-87891fec6ed4" containerName="registry-server" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.670917 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.674137 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.674451 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.679247 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx"] Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.680127 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.680555 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-s8rdk" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.682646 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.689301 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx"] Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690432 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2c42\" (UniqueName: \"kubernetes.io/projected/f2695e9e-a774-4ab3-823a-6ea088db6ae8-kube-api-access-z2c42\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690491 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-startup\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690551 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-metrics-certs\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690662 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvn4j\" (UniqueName: \"kubernetes.io/projected/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-kube-api-access-cvn4j\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690714 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-reloader\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690747 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-sockets\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690761 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2695e9e-a774-4ab3-823a-6ea088db6ae8-cert\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690830 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-metrics\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.690862 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-conf\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.766135 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-jwm85"] Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.767077 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.769728 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.769752 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.770832 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-f9579" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.780293 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.788132 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-dnrkj"] Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.789511 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.791427 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792191 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-metrics-certs\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792249 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfbcl\" (UniqueName: \"kubernetes.io/projected/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-kube-api-access-mfbcl\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792283 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2c42\" (UniqueName: \"kubernetes.io/projected/f2695e9e-a774-4ab3-823a-6ea088db6ae8-kube-api-access-z2c42\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792311 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-startup\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792339 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-metrics-certs\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792360 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metrics-certs\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792383 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792425 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metallb-excludel2\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792447 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvn4j\" (UniqueName: \"kubernetes.io/projected/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-kube-api-access-cvn4j\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792476 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-reloader\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792507 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-sockets\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792527 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2695e9e-a774-4ab3-823a-6ea088db6ae8-cert\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792552 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlvjh\" (UniqueName: \"kubernetes.io/projected/c7735c55-9453-4052-8156-30e1155c73eb-kube-api-access-nlvjh\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792575 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-cert\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792598 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-metrics\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.792622 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-conf\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.793078 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-conf\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.793288 4675 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.793343 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f2695e9e-a774-4ab3-823a-6ea088db6ae8-cert podName:f2695e9e-a774-4ab3-823a-6ea088db6ae8 nodeName:}" failed. No retries permitted until 2025-11-25 12:41:04.293323656 +0000 UTC m=+809.464916097 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f2695e9e-a774-4ab3-823a-6ea088db6ae8-cert") pod "frr-k8s-webhook-server-6998585d5-2vqjx" (UID: "f2695e9e-a774-4ab3-823a-6ea088db6ae8") : secret "frr-k8s-webhook-server-cert" not found Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.793512 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-metrics\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.793754 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-sockets\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.794080 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-reloader\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.794394 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-frr-startup\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.800548 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-metrics-certs\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.807943 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-dnrkj"] Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.822412 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvn4j\" (UniqueName: \"kubernetes.io/projected/afe1f56b-a6db-4458-bdb3-a5b6f88e30b0-kube-api-access-cvn4j\") pod \"frr-k8s-45wff\" (UID: \"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0\") " pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.843960 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2c42\" (UniqueName: \"kubernetes.io/projected/f2695e9e-a774-4ab3-823a-6ea088db6ae8-kube-api-access-z2c42\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893130 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metallb-excludel2\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893240 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlvjh\" (UniqueName: \"kubernetes.io/projected/c7735c55-9453-4052-8156-30e1155c73eb-kube-api-access-nlvjh\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893260 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-cert\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893291 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-metrics-certs\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893327 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfbcl\" (UniqueName: \"kubernetes.io/projected/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-kube-api-access-mfbcl\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893384 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metrics-certs\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.893406 4675 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.893460 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-metrics-certs podName:c7735c55-9453-4052-8156-30e1155c73eb nodeName:}" failed. No retries permitted until 2025-11-25 12:41:04.39344428 +0000 UTC m=+809.565036621 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-metrics-certs") pod "controller-6c7b4b5f48-dnrkj" (UID: "c7735c55-9453-4052-8156-30e1155c73eb") : secret "controller-certs-secret" not found Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893562 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.893576 4675 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.893608 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metrics-certs podName:c2d490ab-73e3-4ff8-a9e1-1359fa135b87 nodeName:}" failed. No retries permitted until 2025-11-25 12:41:04.393597174 +0000 UTC m=+809.565189515 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metrics-certs") pod "speaker-jwm85" (UID: "c2d490ab-73e3-4ff8-a9e1-1359fa135b87") : secret "speaker-certs-secret" not found Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.893657 4675 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 12:41:03 crc kubenswrapper[4675]: E1125 12:41:03.893691 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist podName:c2d490ab-73e3-4ff8-a9e1-1359fa135b87 nodeName:}" failed. No retries permitted until 2025-11-25 12:41:04.393679147 +0000 UTC m=+809.565271478 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist") pod "speaker-jwm85" (UID: "c2d490ab-73e3-4ff8-a9e1-1359fa135b87") : secret "metallb-memberlist" not found Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.893923 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metallb-excludel2\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.900651 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.907853 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-cert\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.917936 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfbcl\" (UniqueName: \"kubernetes.io/projected/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-kube-api-access-mfbcl\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.937956 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlvjh\" (UniqueName: \"kubernetes.io/projected/c7735c55-9453-4052-8156-30e1155c73eb-kube-api-access-nlvjh\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:03 crc kubenswrapper[4675]: I1125 12:41:03.991659 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.297650 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2695e9e-a774-4ab3-823a-6ea088db6ae8-cert\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.301295 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f2695e9e-a774-4ab3-823a-6ea088db6ae8-cert\") pod \"frr-k8s-webhook-server-6998585d5-2vqjx\" (UID: \"f2695e9e-a774-4ab3-823a-6ea088db6ae8\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.399421 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-metrics-certs\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.399491 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metrics-certs\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.399509 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:04 crc kubenswrapper[4675]: E1125 12:41:04.399626 4675 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 12:41:04 crc kubenswrapper[4675]: E1125 12:41:04.399673 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist podName:c2d490ab-73e3-4ff8-a9e1-1359fa135b87 nodeName:}" failed. No retries permitted until 2025-11-25 12:41:05.399658772 +0000 UTC m=+810.571251113 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist") pod "speaker-jwm85" (UID: "c2d490ab-73e3-4ff8-a9e1-1359fa135b87") : secret "metallb-memberlist" not found Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.402505 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-metrics-certs\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.402599 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7735c55-9453-4052-8156-30e1155c73eb-metrics-certs\") pod \"controller-6c7b4b5f48-dnrkj\" (UID: \"c7735c55-9453-4052-8156-30e1155c73eb\") " pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.423482 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.565534 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"b180c7f2bd9e1946f7c437a14cdda08cc21f8206e1c01976468fde189c1baf9e"} Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.599605 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.850495 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-dnrkj"] Nov 25 12:41:04 crc kubenswrapper[4675]: I1125 12:41:04.985259 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx"] Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.414312 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.423061 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c2d490ab-73e3-4ff8-a9e1-1359fa135b87-memberlist\") pod \"speaker-jwm85\" (UID: \"c2d490ab-73e3-4ff8-a9e1-1359fa135b87\") " pod="metallb-system/speaker-jwm85" Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.573352 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-dnrkj" event={"ID":"c7735c55-9453-4052-8156-30e1155c73eb","Type":"ContainerStarted","Data":"15a823df42791535c10c8113e6b2d219aeae9d2c34f56c430fc202a3520ddd46"} Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.573391 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-dnrkj" event={"ID":"c7735c55-9453-4052-8156-30e1155c73eb","Type":"ContainerStarted","Data":"8fca67bce84796d278cfc3471b4b875ad0f4e872cb17a21e85b9d2d9ad39ea4c"} Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.573401 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-dnrkj" event={"ID":"c7735c55-9453-4052-8156-30e1155c73eb","Type":"ContainerStarted","Data":"b2f1b9663c4baa960b5375983ce1199b5eb938ad282b31925c94e947fc6b7f8f"} Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.573502 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.576733 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" event={"ID":"f2695e9e-a774-4ab3-823a-6ea088db6ae8","Type":"ContainerStarted","Data":"2adc65261310952c1ef5b609b896e5403d3c228dcfe3f444ef00bfc0742e82a8"} Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.579852 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-jwm85" Nov 25 12:41:05 crc kubenswrapper[4675]: I1125 12:41:05.601378 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-dnrkj" podStartSLOduration=2.601359869 podStartE2EDuration="2.601359869s" podCreationTimestamp="2025-11-25 12:41:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:41:05.599556044 +0000 UTC m=+810.771148395" watchObservedRunningTime="2025-11-25 12:41:05.601359869 +0000 UTC m=+810.772952210" Nov 25 12:41:05 crc kubenswrapper[4675]: W1125 12:41:05.608869 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2d490ab_73e3_4ff8_a9e1_1359fa135b87.slice/crio-9d3f0d706896c06310d4ea18226ff097b2183c12186191cdc3a90d710ed51dd6 WatchSource:0}: Error finding container 9d3f0d706896c06310d4ea18226ff097b2183c12186191cdc3a90d710ed51dd6: Status 404 returned error can't find the container with id 9d3f0d706896c06310d4ea18226ff097b2183c12186191cdc3a90d710ed51dd6 Nov 25 12:41:06 crc kubenswrapper[4675]: I1125 12:41:06.590453 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-jwm85" event={"ID":"c2d490ab-73e3-4ff8-a9e1-1359fa135b87","Type":"ContainerStarted","Data":"21f96d96d483c57ec0c64086779f172d4860044b3b0bb3dddf781beb7f16996b"} Nov 25 12:41:06 crc kubenswrapper[4675]: I1125 12:41:06.590793 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-jwm85" event={"ID":"c2d490ab-73e3-4ff8-a9e1-1359fa135b87","Type":"ContainerStarted","Data":"e2cca3dbeab8368ccc19e1cdf72ced088790bcd06fa0846ca9b619c4ac6a2849"} Nov 25 12:41:06 crc kubenswrapper[4675]: I1125 12:41:06.590809 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-jwm85" event={"ID":"c2d490ab-73e3-4ff8-a9e1-1359fa135b87","Type":"ContainerStarted","Data":"9d3f0d706896c06310d4ea18226ff097b2183c12186191cdc3a90d710ed51dd6"} Nov 25 12:41:06 crc kubenswrapper[4675]: I1125 12:41:06.591510 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-jwm85" Nov 25 12:41:12 crc kubenswrapper[4675]: I1125 12:41:12.636595 4675 generic.go:334] "Generic (PLEG): container finished" podID="afe1f56b-a6db-4458-bdb3-a5b6f88e30b0" containerID="68894b12fa78aa8926a8520b92e339b257e5da6f69cefc63ae73edae439b7981" exitCode=0 Nov 25 12:41:12 crc kubenswrapper[4675]: I1125 12:41:12.637307 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerDied","Data":"68894b12fa78aa8926a8520b92e339b257e5da6f69cefc63ae73edae439b7981"} Nov 25 12:41:12 crc kubenswrapper[4675]: I1125 12:41:12.640158 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" event={"ID":"f2695e9e-a774-4ab3-823a-6ea088db6ae8","Type":"ContainerStarted","Data":"2d455a21c2c126650b75b0c6890b8908e4791e509cbad6a2458e23316c460276"} Nov 25 12:41:12 crc kubenswrapper[4675]: I1125 12:41:12.640583 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:12 crc kubenswrapper[4675]: I1125 12:41:12.698659 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-jwm85" podStartSLOduration=9.698643903 podStartE2EDuration="9.698643903s" podCreationTimestamp="2025-11-25 12:41:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:41:06.643570375 +0000 UTC m=+811.815162746" watchObservedRunningTime="2025-11-25 12:41:12.698643903 +0000 UTC m=+817.870236244" Nov 25 12:41:12 crc kubenswrapper[4675]: I1125 12:41:12.711191 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" podStartSLOduration=2.988165448 podStartE2EDuration="9.711177959s" podCreationTimestamp="2025-11-25 12:41:03 +0000 UTC" firstStartedPulling="2025-11-25 12:41:04.995843728 +0000 UTC m=+810.167436069" lastFinishedPulling="2025-11-25 12:41:11.718856239 +0000 UTC m=+816.890448580" observedRunningTime="2025-11-25 12:41:12.708804733 +0000 UTC m=+817.880397074" watchObservedRunningTime="2025-11-25 12:41:12.711177959 +0000 UTC m=+817.882770300" Nov 25 12:41:13 crc kubenswrapper[4675]: I1125 12:41:13.647151 4675 generic.go:334] "Generic (PLEG): container finished" podID="afe1f56b-a6db-4458-bdb3-a5b6f88e30b0" containerID="a0ecbb4bb7fb4631241e03faaa1ebf87ec7f56e5ddb872aab50377eb3d9de14a" exitCode=0 Nov 25 12:41:13 crc kubenswrapper[4675]: I1125 12:41:13.647245 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerDied","Data":"a0ecbb4bb7fb4631241e03faaa1ebf87ec7f56e5ddb872aab50377eb3d9de14a"} Nov 25 12:41:14 crc kubenswrapper[4675]: I1125 12:41:14.428146 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-dnrkj" Nov 25 12:41:14 crc kubenswrapper[4675]: I1125 12:41:14.654497 4675 generic.go:334] "Generic (PLEG): container finished" podID="afe1f56b-a6db-4458-bdb3-a5b6f88e30b0" containerID="ab6033c497501b5858a8a14675fc052073d916d627abd3a1778b8e559332ed6a" exitCode=0 Nov 25 12:41:14 crc kubenswrapper[4675]: I1125 12:41:14.654590 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerDied","Data":"ab6033c497501b5858a8a14675fc052073d916d627abd3a1778b8e559332ed6a"} Nov 25 12:41:15 crc kubenswrapper[4675]: I1125 12:41:15.583830 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-jwm85" Nov 25 12:41:15 crc kubenswrapper[4675]: I1125 12:41:15.665752 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"92d94d213512d392c8ec345c77e82174533f98e428c9d368c96a46c3d178e04e"} Nov 25 12:41:15 crc kubenswrapper[4675]: I1125 12:41:15.665800 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"37a6bb0da4634dfa5df231fb9981263ef41f51bede14228cf7e2addef594bb8d"} Nov 25 12:41:15 crc kubenswrapper[4675]: I1125 12:41:15.665828 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"eb619700765add7eb88b59d623db729e6d8834528d8d404a5964ee240cbdf74d"} Nov 25 12:41:15 crc kubenswrapper[4675]: I1125 12:41:15.665840 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"8aa6efad781ce6522521c17d389c457fc604947fc470460fc9d57782934e8724"} Nov 25 12:41:15 crc kubenswrapper[4675]: I1125 12:41:15.665851 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"d4b19c949dd276f307cdac8b158b5d19d724e7865ab106cc2767ddf7d8598a6f"} Nov 25 12:41:16 crc kubenswrapper[4675]: I1125 12:41:16.674343 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-45wff" event={"ID":"afe1f56b-a6db-4458-bdb3-a5b6f88e30b0","Type":"ContainerStarted","Data":"2c86302dc1b4fdc081f147a8e125e35ff71b89d742508992d2cf973598a045ef"} Nov 25 12:41:16 crc kubenswrapper[4675]: I1125 12:41:16.674666 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:16 crc kubenswrapper[4675]: I1125 12:41:16.699764 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-45wff" podStartSLOduration=6.190711848 podStartE2EDuration="13.699743176s" podCreationTimestamp="2025-11-25 12:41:03 +0000 UTC" firstStartedPulling="2025-11-25 12:41:04.194108862 +0000 UTC m=+809.365701203" lastFinishedPulling="2025-11-25 12:41:11.7031402 +0000 UTC m=+816.874732531" observedRunningTime="2025-11-25 12:41:16.698475566 +0000 UTC m=+821.870067927" watchObservedRunningTime="2025-11-25 12:41:16.699743176 +0000 UTC m=+821.871335517" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.661503 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-vsfws"] Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.663781 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.667034 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.667073 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-sfbl5" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.667358 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.734450 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vsfws"] Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.799775 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rqwf\" (UniqueName: \"kubernetes.io/projected/d80f92a0-6bd1-4b91-b87e-888f9e929515-kube-api-access-4rqwf\") pod \"openstack-operator-index-vsfws\" (UID: \"d80f92a0-6bd1-4b91-b87e-888f9e929515\") " pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.901538 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rqwf\" (UniqueName: \"kubernetes.io/projected/d80f92a0-6bd1-4b91-b87e-888f9e929515-kube-api-access-4rqwf\") pod \"openstack-operator-index-vsfws\" (UID: \"d80f92a0-6bd1-4b91-b87e-888f9e929515\") " pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.919579 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rqwf\" (UniqueName: \"kubernetes.io/projected/d80f92a0-6bd1-4b91-b87e-888f9e929515-kube-api-access-4rqwf\") pod \"openstack-operator-index-vsfws\" (UID: \"d80f92a0-6bd1-4b91-b87e-888f9e929515\") " pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:18 crc kubenswrapper[4675]: I1125 12:41:18.993071 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:19 crc kubenswrapper[4675]: I1125 12:41:19.007482 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:19 crc kubenswrapper[4675]: I1125 12:41:19.045355 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:19 crc kubenswrapper[4675]: I1125 12:41:19.455855 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vsfws"] Nov 25 12:41:19 crc kubenswrapper[4675]: I1125 12:41:19.700029 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vsfws" event={"ID":"d80f92a0-6bd1-4b91-b87e-888f9e929515","Type":"ContainerStarted","Data":"b743899a0a06ad9ef2dc1979b370f43e3f3eefb08b2475547006a8126d44eda0"} Nov 25 12:41:21 crc kubenswrapper[4675]: I1125 12:41:21.835389 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vsfws"] Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.459715 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-9qsg7"] Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.460474 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.463424 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9qsg7"] Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.658468 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z247x\" (UniqueName: \"kubernetes.io/projected/c3e36545-46f5-4907-84b0-93ed29882b8c-kube-api-access-z247x\") pod \"openstack-operator-index-9qsg7\" (UID: \"c3e36545-46f5-4907-84b0-93ed29882b8c\") " pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.764035 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z247x\" (UniqueName: \"kubernetes.io/projected/c3e36545-46f5-4907-84b0-93ed29882b8c-kube-api-access-z247x\") pod \"openstack-operator-index-9qsg7\" (UID: \"c3e36545-46f5-4907-84b0-93ed29882b8c\") " pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.784249 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z247x\" (UniqueName: \"kubernetes.io/projected/c3e36545-46f5-4907-84b0-93ed29882b8c-kube-api-access-z247x\") pod \"openstack-operator-index-9qsg7\" (UID: \"c3e36545-46f5-4907-84b0-93ed29882b8c\") " pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:22 crc kubenswrapper[4675]: I1125 12:41:22.788361 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.258149 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9qsg7"] Nov 25 12:41:23 crc kubenswrapper[4675]: W1125 12:41:23.261910 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3e36545_46f5_4907_84b0_93ed29882b8c.slice/crio-0ab06a72405f76206715b8c458e3e6c4f991cc6c381993aa55506ab6c5aa6512 WatchSource:0}: Error finding container 0ab06a72405f76206715b8c458e3e6c4f991cc6c381993aa55506ab6c5aa6512: Status 404 returned error can't find the container with id 0ab06a72405f76206715b8c458e3e6c4f991cc6c381993aa55506ab6c5aa6512 Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.722014 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vsfws" event={"ID":"d80f92a0-6bd1-4b91-b87e-888f9e929515","Type":"ContainerStarted","Data":"aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b"} Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.722384 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-vsfws" podUID="d80f92a0-6bd1-4b91-b87e-888f9e929515" containerName="registry-server" containerID="cri-o://aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b" gracePeriod=2 Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.724099 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9qsg7" event={"ID":"c3e36545-46f5-4907-84b0-93ed29882b8c","Type":"ContainerStarted","Data":"3e13c5eedebe6bd409bd2861ff4297e8675b0a52191c81d9e41172c64cabde70"} Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.724159 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9qsg7" event={"ID":"c3e36545-46f5-4907-84b0-93ed29882b8c","Type":"ContainerStarted","Data":"0ab06a72405f76206715b8c458e3e6c4f991cc6c381993aa55506ab6c5aa6512"} Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.754195 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-9qsg7" podStartSLOduration=1.592629934 podStartE2EDuration="1.754168496s" podCreationTimestamp="2025-11-25 12:41:22 +0000 UTC" firstStartedPulling="2025-11-25 12:41:23.266574373 +0000 UTC m=+828.438166714" lastFinishedPulling="2025-11-25 12:41:23.428112935 +0000 UTC m=+828.599705276" observedRunningTime="2025-11-25 12:41:23.751391906 +0000 UTC m=+828.922984247" watchObservedRunningTime="2025-11-25 12:41:23.754168496 +0000 UTC m=+828.925760877" Nov 25 12:41:23 crc kubenswrapper[4675]: I1125 12:41:23.758389 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-vsfws" podStartSLOduration=2.374584852 podStartE2EDuration="5.758370212s" podCreationTimestamp="2025-11-25 12:41:18 +0000 UTC" firstStartedPulling="2025-11-25 12:41:19.463080969 +0000 UTC m=+824.634673310" lastFinishedPulling="2025-11-25 12:41:22.846866329 +0000 UTC m=+828.018458670" observedRunningTime="2025-11-25 12:41:23.738938743 +0000 UTC m=+828.910531084" watchObservedRunningTime="2025-11-25 12:41:23.758370212 +0000 UTC m=+828.929962593" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.111056 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.179048 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rqwf\" (UniqueName: \"kubernetes.io/projected/d80f92a0-6bd1-4b91-b87e-888f9e929515-kube-api-access-4rqwf\") pod \"d80f92a0-6bd1-4b91-b87e-888f9e929515\" (UID: \"d80f92a0-6bd1-4b91-b87e-888f9e929515\") " Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.184097 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d80f92a0-6bd1-4b91-b87e-888f9e929515-kube-api-access-4rqwf" (OuterVolumeSpecName: "kube-api-access-4rqwf") pod "d80f92a0-6bd1-4b91-b87e-888f9e929515" (UID: "d80f92a0-6bd1-4b91-b87e-888f9e929515"). InnerVolumeSpecName "kube-api-access-4rqwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.280108 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rqwf\" (UniqueName: \"kubernetes.io/projected/d80f92a0-6bd1-4b91-b87e-888f9e929515-kube-api-access-4rqwf\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.605740 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-2vqjx" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.733659 4675 generic.go:334] "Generic (PLEG): container finished" podID="d80f92a0-6bd1-4b91-b87e-888f9e929515" containerID="aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b" exitCode=0 Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.733754 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vsfws" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.733759 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vsfws" event={"ID":"d80f92a0-6bd1-4b91-b87e-888f9e929515","Type":"ContainerDied","Data":"aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b"} Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.733845 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vsfws" event={"ID":"d80f92a0-6bd1-4b91-b87e-888f9e929515","Type":"ContainerDied","Data":"b743899a0a06ad9ef2dc1979b370f43e3f3eefb08b2475547006a8126d44eda0"} Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.733870 4675 scope.go:117] "RemoveContainer" containerID="aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.754175 4675 scope.go:117] "RemoveContainer" containerID="aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b" Nov 25 12:41:24 crc kubenswrapper[4675]: E1125 12:41:24.754865 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b\": container with ID starting with aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b not found: ID does not exist" containerID="aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.755036 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b"} err="failed to get container status \"aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b\": rpc error: code = NotFound desc = could not find container \"aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b\": container with ID starting with aca269b3b11e5d0fc20ba9c6601f6324f513d55da10a0161fa23a16e14c1540b not found: ID does not exist" Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.774064 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vsfws"] Nov 25 12:41:24 crc kubenswrapper[4675]: I1125 12:41:24.779745 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-vsfws"] Nov 25 12:41:25 crc kubenswrapper[4675]: I1125 12:41:25.541107 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d80f92a0-6bd1-4b91-b87e-888f9e929515" path="/var/lib/kubelet/pods/d80f92a0-6bd1-4b91-b87e-888f9e929515/volumes" Nov 25 12:41:32 crc kubenswrapper[4675]: I1125 12:41:32.789570 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:32 crc kubenswrapper[4675]: I1125 12:41:32.791603 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:32 crc kubenswrapper[4675]: I1125 12:41:32.831375 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:33 crc kubenswrapper[4675]: I1125 12:41:33.809202 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-9qsg7" Nov 25 12:41:33 crc kubenswrapper[4675]: I1125 12:41:33.994719 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-45wff" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.136681 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-78tvk"] Nov 25 12:41:38 crc kubenswrapper[4675]: E1125 12:41:38.137350 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d80f92a0-6bd1-4b91-b87e-888f9e929515" containerName="registry-server" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.137371 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="d80f92a0-6bd1-4b91-b87e-888f9e929515" containerName="registry-server" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.137564 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="d80f92a0-6bd1-4b91-b87e-888f9e929515" containerName="registry-server" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.138911 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.151652 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-78tvk"] Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.278758 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-catalog-content\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.278797 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hljfq\" (UniqueName: \"kubernetes.io/projected/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-kube-api-access-hljfq\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.279444 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-utilities\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.380689 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-utilities\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.380794 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-catalog-content\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.380831 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hljfq\" (UniqueName: \"kubernetes.io/projected/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-kube-api-access-hljfq\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.381407 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-utilities\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.381415 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-catalog-content\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.403724 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hljfq\" (UniqueName: \"kubernetes.io/projected/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-kube-api-access-hljfq\") pod \"certified-operators-78tvk\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.459932 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:38 crc kubenswrapper[4675]: I1125 12:41:38.933873 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-78tvk"] Nov 25 12:41:38 crc kubenswrapper[4675]: W1125 12:41:38.941785 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e62ca1c_0bc2_4fef_95ce_7d95d18543ce.slice/crio-853d51024868964616b5fe8b4fe9beea3b876024e3b3ab68e06aa60a6b341d82 WatchSource:0}: Error finding container 853d51024868964616b5fe8b4fe9beea3b876024e3b3ab68e06aa60a6b341d82: Status 404 returned error can't find the container with id 853d51024868964616b5fe8b4fe9beea3b876024e3b3ab68e06aa60a6b341d82 Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.160368 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw"] Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.162678 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.165461 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9jw6n" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.178516 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw"] Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.193217 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-bundle\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.193446 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qkvd\" (UniqueName: \"kubernetes.io/projected/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-kube-api-access-7qkvd\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.193574 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-util\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.294636 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qkvd\" (UniqueName: \"kubernetes.io/projected/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-kube-api-access-7qkvd\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.294695 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-util\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.294744 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-bundle\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.295388 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-bundle\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.295623 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-util\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.312233 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qkvd\" (UniqueName: \"kubernetes.io/projected/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-kube-api-access-7qkvd\") pod \"418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.477908 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.825240 4675 generic.go:334] "Generic (PLEG): container finished" podID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerID="649637c5d8caee75d783dc9284d928ffd7ceb952959ae546d3d8496f3246629d" exitCode=0 Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.825284 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerDied","Data":"649637c5d8caee75d783dc9284d928ffd7ceb952959ae546d3d8496f3246629d"} Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.825307 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerStarted","Data":"853d51024868964616b5fe8b4fe9beea3b876024e3b3ab68e06aa60a6b341d82"} Nov 25 12:41:39 crc kubenswrapper[4675]: I1125 12:41:39.925006 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw"] Nov 25 12:41:40 crc kubenswrapper[4675]: I1125 12:41:40.832136 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerStarted","Data":"58d6171b528d54d6fd4e0fa0df234d2391eb0b9802c8f8c733a4a5be78925fb9"} Nov 25 12:41:40 crc kubenswrapper[4675]: I1125 12:41:40.834163 4675 generic.go:334] "Generic (PLEG): container finished" podID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerID="f04addd7b85cecda6aff80efdd65abe24fff06937d6853d21ad6cbc66a6e1766" exitCode=0 Nov 25 12:41:40 crc kubenswrapper[4675]: I1125 12:41:40.834212 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" event={"ID":"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82","Type":"ContainerDied","Data":"f04addd7b85cecda6aff80efdd65abe24fff06937d6853d21ad6cbc66a6e1766"} Nov 25 12:41:40 crc kubenswrapper[4675]: I1125 12:41:40.834238 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" event={"ID":"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82","Type":"ContainerStarted","Data":"3e1b621fac19107d4bda07346ee616b4062a6dd1de181e1d77402fb568fd0fae"} Nov 25 12:41:41 crc kubenswrapper[4675]: I1125 12:41:41.840263 4675 generic.go:334] "Generic (PLEG): container finished" podID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerID="58d6171b528d54d6fd4e0fa0df234d2391eb0b9802c8f8c733a4a5be78925fb9" exitCode=0 Nov 25 12:41:41 crc kubenswrapper[4675]: I1125 12:41:41.840313 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerDied","Data":"58d6171b528d54d6fd4e0fa0df234d2391eb0b9802c8f8c733a4a5be78925fb9"} Nov 25 12:41:41 crc kubenswrapper[4675]: I1125 12:41:41.844166 4675 generic.go:334] "Generic (PLEG): container finished" podID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerID="958bab166a8a1575ba5f40c8d097395c1f55100b97cb23f93ab40f28d86b7a93" exitCode=0 Nov 25 12:41:41 crc kubenswrapper[4675]: I1125 12:41:41.844211 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" event={"ID":"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82","Type":"ContainerDied","Data":"958bab166a8a1575ba5f40c8d097395c1f55100b97cb23f93ab40f28d86b7a93"} Nov 25 12:41:42 crc kubenswrapper[4675]: I1125 12:41:42.855456 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerStarted","Data":"334ac133590cc971093eabc38fc0eff8bc350c02a3c742c2f3338da96a242025"} Nov 25 12:41:42 crc kubenswrapper[4675]: I1125 12:41:42.857937 4675 generic.go:334] "Generic (PLEG): container finished" podID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerID="50f4be2740b3362e1b700f631f92514a1d8b56101c6f6624ccbb836e8b17bccc" exitCode=0 Nov 25 12:41:42 crc kubenswrapper[4675]: I1125 12:41:42.858009 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" event={"ID":"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82","Type":"ContainerDied","Data":"50f4be2740b3362e1b700f631f92514a1d8b56101c6f6624ccbb836e8b17bccc"} Nov 25 12:41:42 crc kubenswrapper[4675]: I1125 12:41:42.901681 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-78tvk" podStartSLOduration=2.4736888 podStartE2EDuration="4.901663977s" podCreationTimestamp="2025-11-25 12:41:38 +0000 UTC" firstStartedPulling="2025-11-25 12:41:39.826942564 +0000 UTC m=+844.998534905" lastFinishedPulling="2025-11-25 12:41:42.254917711 +0000 UTC m=+847.426510082" observedRunningTime="2025-11-25 12:41:42.901334146 +0000 UTC m=+848.072926497" watchObservedRunningTime="2025-11-25 12:41:42.901663977 +0000 UTC m=+848.073256318" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.117219 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.264848 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-bundle\") pod \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.264939 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-util\") pod \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.265020 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qkvd\" (UniqueName: \"kubernetes.io/projected/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-kube-api-access-7qkvd\") pod \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\" (UID: \"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82\") " Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.265725 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-bundle" (OuterVolumeSpecName: "bundle") pod "2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" (UID: "2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.270894 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-kube-api-access-7qkvd" (OuterVolumeSpecName: "kube-api-access-7qkvd") pod "2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" (UID: "2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82"). InnerVolumeSpecName "kube-api-access-7qkvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.279319 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-util" (OuterVolumeSpecName: "util") pod "2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" (UID: "2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.366141 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qkvd\" (UniqueName: \"kubernetes.io/projected/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-kube-api-access-7qkvd\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.366170 4675 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.366179 4675 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82-util\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.874131 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" event={"ID":"2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82","Type":"ContainerDied","Data":"3e1b621fac19107d4bda07346ee616b4062a6dd1de181e1d77402fb568fd0fae"} Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.874192 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e1b621fac19107d4bda07346ee616b4062a6dd1de181e1d77402fb568fd0fae" Nov 25 12:41:44 crc kubenswrapper[4675]: I1125 12:41:44.874206 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw" Nov 25 12:41:48 crc kubenswrapper[4675]: I1125 12:41:48.460364 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:48 crc kubenswrapper[4675]: I1125 12:41:48.460671 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:48 crc kubenswrapper[4675]: I1125 12:41:48.498344 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:48 crc kubenswrapper[4675]: I1125 12:41:48.936893 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.568761 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556"] Nov 25 12:41:49 crc kubenswrapper[4675]: E1125 12:41:49.569013 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="pull" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.569025 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="pull" Nov 25 12:41:49 crc kubenswrapper[4675]: E1125 12:41:49.569045 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="util" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.569050 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="util" Nov 25 12:41:49 crc kubenswrapper[4675]: E1125 12:41:49.569056 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="extract" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.569062 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="extract" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.569157 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82" containerName="extract" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.569705 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.577231 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-fqkwn" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.614643 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556"] Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.631491 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhqfn\" (UniqueName: \"kubernetes.io/projected/01418b22-5bf7-4486-bc9c-fe8d6d757b3d-kube-api-access-mhqfn\") pod \"openstack-operator-controller-operator-77b99896c6-rz556\" (UID: \"01418b22-5bf7-4486-bc9c-fe8d6d757b3d\") " pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.732753 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhqfn\" (UniqueName: \"kubernetes.io/projected/01418b22-5bf7-4486-bc9c-fe8d6d757b3d-kube-api-access-mhqfn\") pod \"openstack-operator-controller-operator-77b99896c6-rz556\" (UID: \"01418b22-5bf7-4486-bc9c-fe8d6d757b3d\") " pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.760097 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhqfn\" (UniqueName: \"kubernetes.io/projected/01418b22-5bf7-4486-bc9c-fe8d6d757b3d-kube-api-access-mhqfn\") pod \"openstack-operator-controller-operator-77b99896c6-rz556\" (UID: \"01418b22-5bf7-4486-bc9c-fe8d6d757b3d\") " pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:41:49 crc kubenswrapper[4675]: I1125 12:41:49.887167 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:41:50 crc kubenswrapper[4675]: I1125 12:41:50.466034 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556"] Nov 25 12:41:50 crc kubenswrapper[4675]: I1125 12:41:50.711022 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-78tvk"] Nov 25 12:41:50 crc kubenswrapper[4675]: I1125 12:41:50.909046 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" event={"ID":"01418b22-5bf7-4486-bc9c-fe8d6d757b3d","Type":"ContainerStarted","Data":"045141bd869adee471ab2c1ccbea86a7ff4d2fc5e9e87b74fa75e2f14318527d"} Nov 25 12:41:50 crc kubenswrapper[4675]: I1125 12:41:50.909210 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-78tvk" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="registry-server" containerID="cri-o://334ac133590cc971093eabc38fc0eff8bc350c02a3c742c2f3338da96a242025" gracePeriod=2 Nov 25 12:41:51 crc kubenswrapper[4675]: I1125 12:41:51.945638 4675 generic.go:334] "Generic (PLEG): container finished" podID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerID="334ac133590cc971093eabc38fc0eff8bc350c02a3c742c2f3338da96a242025" exitCode=0 Nov 25 12:41:51 crc kubenswrapper[4675]: I1125 12:41:51.945691 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerDied","Data":"334ac133590cc971093eabc38fc0eff8bc350c02a3c742c2f3338da96a242025"} Nov 25 12:41:51 crc kubenswrapper[4675]: I1125 12:41:51.947369 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-78tvk" event={"ID":"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce","Type":"ContainerDied","Data":"853d51024868964616b5fe8b4fe9beea3b876024e3b3ab68e06aa60a6b341d82"} Nov 25 12:41:51 crc kubenswrapper[4675]: I1125 12:41:51.947408 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="853d51024868964616b5fe8b4fe9beea3b876024e3b3ab68e06aa60a6b341d82" Nov 25 12:41:51 crc kubenswrapper[4675]: I1125 12:41:51.999636 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.160761 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-catalog-content\") pod \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.160845 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-utilities\") pod \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.160928 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hljfq\" (UniqueName: \"kubernetes.io/projected/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-kube-api-access-hljfq\") pod \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\" (UID: \"9e62ca1c-0bc2-4fef-95ce-7d95d18543ce\") " Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.161957 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-utilities" (OuterVolumeSpecName: "utilities") pod "9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" (UID: "9e62ca1c-0bc2-4fef-95ce-7d95d18543ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.166588 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-kube-api-access-hljfq" (OuterVolumeSpecName: "kube-api-access-hljfq") pod "9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" (UID: "9e62ca1c-0bc2-4fef-95ce-7d95d18543ce"). InnerVolumeSpecName "kube-api-access-hljfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.214324 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" (UID: "9e62ca1c-0bc2-4fef-95ce-7d95d18543ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.262943 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hljfq\" (UniqueName: \"kubernetes.io/projected/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-kube-api-access-hljfq\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.262986 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.262996 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.952129 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-78tvk" Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.983531 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-78tvk"] Nov 25 12:41:52 crc kubenswrapper[4675]: I1125 12:41:52.988861 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-78tvk"] Nov 25 12:41:53 crc kubenswrapper[4675]: I1125 12:41:53.544766 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" path="/var/lib/kubelet/pods/9e62ca1c-0bc2-4fef-95ce-7d95d18543ce/volumes" Nov 25 12:41:56 crc kubenswrapper[4675]: I1125 12:41:56.979645 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" event={"ID":"01418b22-5bf7-4486-bc9c-fe8d6d757b3d","Type":"ContainerStarted","Data":"771ad606b5fa16113322ff050190fd8022b2d7dea1753e17e15cbc216aeab07d"} Nov 25 12:42:04 crc kubenswrapper[4675]: I1125 12:42:04.024615 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" event={"ID":"01418b22-5bf7-4486-bc9c-fe8d6d757b3d","Type":"ContainerStarted","Data":"2e6a325f40ae50e9eaa35b79bd7e70d7959eb55bda151298f1708b7962e59436"} Nov 25 12:42:04 crc kubenswrapper[4675]: I1125 12:42:04.026076 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:42:04 crc kubenswrapper[4675]: I1125 12:42:04.029474 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 12:42:04 crc kubenswrapper[4675]: I1125 12:42:04.058837 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" podStartSLOduration=2.108674263 podStartE2EDuration="15.058790276s" podCreationTimestamp="2025-11-25 12:41:49 +0000 UTC" firstStartedPulling="2025-11-25 12:41:50.482879122 +0000 UTC m=+855.654471463" lastFinishedPulling="2025-11-25 12:42:03.432995135 +0000 UTC m=+868.604587476" observedRunningTime="2025-11-25 12:42:04.057868826 +0000 UTC m=+869.229461167" watchObservedRunningTime="2025-11-25 12:42:04.058790276 +0000 UTC m=+869.230382627" Nov 25 12:42:13 crc kubenswrapper[4675]: I1125 12:42:13.662418 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:42:13 crc kubenswrapper[4675]: I1125 12:42:13.663926 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.222298 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5689899996-24rxr"] Nov 25 12:42:21 crc kubenswrapper[4675]: E1125 12:42:21.222983 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="extract-utilities" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.222997 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="extract-utilities" Nov 25 12:42:21 crc kubenswrapper[4675]: E1125 12:42:21.223012 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="registry-server" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.223021 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="registry-server" Nov 25 12:42:21 crc kubenswrapper[4675]: E1125 12:42:21.223042 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="extract-content" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.223052 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="extract-content" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.223188 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e62ca1c-0bc2-4fef-95ce-7d95d18543ce" containerName="registry-server" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.223943 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.227010 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-sh2db" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.234910 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5689899996-24rxr"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.240607 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.241690 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.246145 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-fl8vx" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.281086 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.305314 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.306429 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.311382 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-sqjxf" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.316632 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.317766 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.323909 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-kx8h9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.340602 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.347668 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knhzt\" (UniqueName: \"kubernetes.io/projected/966aefc3-6c87-4e64-b9ae-0c175f4d18a3-kube-api-access-knhzt\") pod \"cinder-operator-controller-manager-748967c98-4hkh4\" (UID: \"966aefc3-6c87-4e64-b9ae-0c175f4d18a3\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.347773 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjbcv\" (UniqueName: \"kubernetes.io/projected/ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3-kube-api-access-gjbcv\") pod \"barbican-operator-controller-manager-5689899996-24rxr\" (UID: \"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3\") " pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.350922 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.368244 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.369123 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.372389 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-gdw6g" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.380674 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.422569 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.423673 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.426420 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.427400 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.428265 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-rqnn6" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.441432 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-v54zb" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.441548 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.449704 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-6pvms"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.450805 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.451056 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfzvp\" (UniqueName: \"kubernetes.io/projected/223d4b40-6f09-41f5-816d-7e82b45b4b90-kube-api-access-wfzvp\") pod \"heat-operator-controller-manager-698d6fd7d6-nkq7r\" (UID: \"223d4b40-6f09-41f5-816d-7e82b45b4b90\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.451158 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knhzt\" (UniqueName: \"kubernetes.io/projected/966aefc3-6c87-4e64-b9ae-0c175f4d18a3-kube-api-access-knhzt\") pod \"cinder-operator-controller-manager-748967c98-4hkh4\" (UID: \"966aefc3-6c87-4e64-b9ae-0c175f4d18a3\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.451237 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n5ts\" (UniqueName: \"kubernetes.io/projected/8d89af10-26a8-4d8b-aedf-8e450df0f28a-kube-api-access-5n5ts\") pod \"designate-operator-controller-manager-6788cc6d75-4cprh\" (UID: \"8d89af10-26a8-4d8b-aedf-8e450df0f28a\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.451457 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjbcv\" (UniqueName: \"kubernetes.io/projected/ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3-kube-api-access-gjbcv\") pod \"barbican-operator-controller-manager-5689899996-24rxr\" (UID: \"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3\") " pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.451556 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6xnc\" (UniqueName: \"kubernetes.io/projected/986b1a58-05d0-4beb-9199-a7564c809455-kube-api-access-h6xnc\") pod \"glance-operator-controller-manager-6bd966bbd4-hzjqx\" (UID: \"986b1a58-05d0-4beb-9199-a7564c809455\") " pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:42:21 crc kubenswrapper[4675]: W1125 12:42:21.468008 4675 reflector.go:561] object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-qcbwl": failed to list *v1.Secret: secrets "ironic-operator-controller-manager-dockercfg-qcbwl" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack-operators": no relationship found between node 'crc' and this object Nov 25 12:42:21 crc kubenswrapper[4675]: E1125 12:42:21.468066 4675 reflector.go:158] "Unhandled Error" err="object-\"openstack-operators\"/\"ironic-operator-controller-manager-dockercfg-qcbwl\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ironic-operator-controller-manager-dockercfg-qcbwl\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.471275 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.487901 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knhzt\" (UniqueName: \"kubernetes.io/projected/966aefc3-6c87-4e64-b9ae-0c175f4d18a3-kube-api-access-knhzt\") pod \"cinder-operator-controller-manager-748967c98-4hkh4\" (UID: \"966aefc3-6c87-4e64-b9ae-0c175f4d18a3\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.491946 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.497083 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-6pvms"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.502393 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjbcv\" (UniqueName: \"kubernetes.io/projected/ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3-kube-api-access-gjbcv\") pod \"barbican-operator-controller-manager-5689899996-24rxr\" (UID: \"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3\") " pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.518961 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.520133 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.532900 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-c2rs6" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.542039 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.557459 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558605 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l882d\" (UniqueName: \"kubernetes.io/projected/a271eb36-50fc-40c6-8885-f97f281c1150-kube-api-access-l882d\") pod \"ironic-operator-controller-manager-54485f899-6pvms\" (UID: \"a271eb36-50fc-40c6-8885-f97f281c1150\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558652 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6xnc\" (UniqueName: \"kubernetes.io/projected/986b1a58-05d0-4beb-9199-a7564c809455-kube-api-access-h6xnc\") pod \"glance-operator-controller-manager-6bd966bbd4-hzjqx\" (UID: \"986b1a58-05d0-4beb-9199-a7564c809455\") " pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558674 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/18941428-e287-4374-93e0-3209cdbbf7d7-cert\") pod \"infra-operator-controller-manager-577c5f6d94-svnp9\" (UID: \"18941428-e287-4374-93e0-3209cdbbf7d7\") " pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558701 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kndlx\" (UniqueName: \"kubernetes.io/projected/18941428-e287-4374-93e0-3209cdbbf7d7-kube-api-access-kndlx\") pod \"infra-operator-controller-manager-577c5f6d94-svnp9\" (UID: \"18941428-e287-4374-93e0-3209cdbbf7d7\") " pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558725 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfzvp\" (UniqueName: \"kubernetes.io/projected/223d4b40-6f09-41f5-816d-7e82b45b4b90-kube-api-access-wfzvp\") pod \"heat-operator-controller-manager-698d6fd7d6-nkq7r\" (UID: \"223d4b40-6f09-41f5-816d-7e82b45b4b90\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558750 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxdcx\" (UniqueName: \"kubernetes.io/projected/51b6ef4f-14c9-4c56-b374-3183ccd5cacb-kube-api-access-dxdcx\") pod \"horizon-operator-controller-manager-7d5d9fd47f-n6gqt\" (UID: \"51b6ef4f-14c9-4c56-b374-3183ccd5cacb\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.558766 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n5ts\" (UniqueName: \"kubernetes.io/projected/8d89af10-26a8-4d8b-aedf-8e450df0f28a-kube-api-access-5n5ts\") pod \"designate-operator-controller-manager-6788cc6d75-4cprh\" (UID: \"8d89af10-26a8-4d8b-aedf-8e450df0f28a\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.583769 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.585635 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.613221 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-pm2zh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.622843 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6xnc\" (UniqueName: \"kubernetes.io/projected/986b1a58-05d0-4beb-9199-a7564c809455-kube-api-access-h6xnc\") pod \"glance-operator-controller-manager-6bd966bbd4-hzjqx\" (UID: \"986b1a58-05d0-4beb-9199-a7564c809455\") " pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.631678 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.646267 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfzvp\" (UniqueName: \"kubernetes.io/projected/223d4b40-6f09-41f5-816d-7e82b45b4b90-kube-api-access-wfzvp\") pod \"heat-operator-controller-manager-698d6fd7d6-nkq7r\" (UID: \"223d4b40-6f09-41f5-816d-7e82b45b4b90\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.647743 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n5ts\" (UniqueName: \"kubernetes.io/projected/8d89af10-26a8-4d8b-aedf-8e450df0f28a-kube-api-access-5n5ts\") pod \"designate-operator-controller-manager-6788cc6d75-4cprh\" (UID: \"8d89af10-26a8-4d8b-aedf-8e450df0f28a\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.673342 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.681097 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l882d\" (UniqueName: \"kubernetes.io/projected/a271eb36-50fc-40c6-8885-f97f281c1150-kube-api-access-l882d\") pod \"ironic-operator-controller-manager-54485f899-6pvms\" (UID: \"a271eb36-50fc-40c6-8885-f97f281c1150\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.681160 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/18941428-e287-4374-93e0-3209cdbbf7d7-cert\") pod \"infra-operator-controller-manager-577c5f6d94-svnp9\" (UID: \"18941428-e287-4374-93e0-3209cdbbf7d7\") " pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.685350 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kndlx\" (UniqueName: \"kubernetes.io/projected/18941428-e287-4374-93e0-3209cdbbf7d7-kube-api-access-kndlx\") pod \"infra-operator-controller-manager-577c5f6d94-svnp9\" (UID: \"18941428-e287-4374-93e0-3209cdbbf7d7\") " pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.685526 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92vr6\" (UniqueName: \"kubernetes.io/projected/33456bb6-8430-432c-ac26-1c43307141e3-kube-api-access-92vr6\") pod \"mariadb-operator-controller-manager-64d7c556cd-tq6jf\" (UID: \"33456bb6-8430-432c-ac26-1c43307141e3\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.685668 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxdcx\" (UniqueName: \"kubernetes.io/projected/51b6ef4f-14c9-4c56-b374-3183ccd5cacb-kube-api-access-dxdcx\") pod \"horizon-operator-controller-manager-7d5d9fd47f-n6gqt\" (UID: \"51b6ef4f-14c9-4c56-b374-3183ccd5cacb\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.685804 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j2k7\" (UniqueName: \"kubernetes.io/projected/fbd303b9-17db-401e-acbf-1ef8219e36df-kube-api-access-7j2k7\") pod \"keystone-operator-controller-manager-7d6f5d799-7p97w\" (UID: \"fbd303b9-17db-401e-acbf-1ef8219e36df\") " pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.700319 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.733457 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/18941428-e287-4374-93e0-3209cdbbf7d7-cert\") pod \"infra-operator-controller-manager-577c5f6d94-svnp9\" (UID: \"18941428-e287-4374-93e0-3209cdbbf7d7\") " pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.762869 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.769548 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxdcx\" (UniqueName: \"kubernetes.io/projected/51b6ef4f-14c9-4c56-b374-3183ccd5cacb-kube-api-access-dxdcx\") pod \"horizon-operator-controller-manager-7d5d9fd47f-n6gqt\" (UID: \"51b6ef4f-14c9-4c56-b374-3183ccd5cacb\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.769611 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kndlx\" (UniqueName: \"kubernetes.io/projected/18941428-e287-4374-93e0-3209cdbbf7d7-kube-api-access-kndlx\") pod \"infra-operator-controller-manager-577c5f6d94-svnp9\" (UID: \"18941428-e287-4374-93e0-3209cdbbf7d7\") " pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.773967 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l882d\" (UniqueName: \"kubernetes.io/projected/a271eb36-50fc-40c6-8885-f97f281c1150-kube-api-access-l882d\") pod \"ironic-operator-controller-manager-54485f899-6pvms\" (UID: \"a271eb36-50fc-40c6-8885-f97f281c1150\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.777078 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.781329 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.786901 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.798016 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j2k7\" (UniqueName: \"kubernetes.io/projected/fbd303b9-17db-401e-acbf-1ef8219e36df-kube-api-access-7j2k7\") pod \"keystone-operator-controller-manager-7d6f5d799-7p97w\" (UID: \"fbd303b9-17db-401e-acbf-1ef8219e36df\") " pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.798112 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92vr6\" (UniqueName: \"kubernetes.io/projected/33456bb6-8430-432c-ac26-1c43307141e3-kube-api-access-92vr6\") pod \"mariadb-operator-controller-manager-64d7c556cd-tq6jf\" (UID: \"33456bb6-8430-432c-ac26-1c43307141e3\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.807102 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-thhbf" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.811919 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.813414 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.821849 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-kk54k" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.833260 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.834351 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.835078 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.835374 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.836435 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.840942 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.841253 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-hv9m7" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.851120 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-5dqnb" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.865097 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.896321 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92vr6\" (UniqueName: \"kubernetes.io/projected/33456bb6-8430-432c-ac26-1c43307141e3-kube-api-access-92vr6\") pod \"mariadb-operator-controller-manager-64d7c556cd-tq6jf\" (UID: \"33456bb6-8430-432c-ac26-1c43307141e3\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.899617 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.902057 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsdh7\" (UniqueName: \"kubernetes.io/projected/d4608140-77a4-4067-b58e-a95ae2249fea-kube-api-access-wsdh7\") pod \"nova-operator-controller-manager-79d658b66d-dls9t\" (UID: \"d4608140-77a4-4067-b58e-a95ae2249fea\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.902179 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz7s6\" (UniqueName: \"kubernetes.io/projected/e6ff98cd-4075-49dd-b40b-d1923298513e-kube-api-access-tz7s6\") pod \"manila-operator-controller-manager-646fd589f9-jdxms\" (UID: \"e6ff98cd-4075-49dd-b40b-d1923298513e\") " pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.902271 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2cqn\" (UniqueName: \"kubernetes.io/projected/8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1-kube-api-access-z2cqn\") pod \"octavia-operator-controller-manager-7979c68bc7-m6zl4\" (UID: \"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1\") " pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.902297 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj8fj\" (UniqueName: \"kubernetes.io/projected/a5a68379-3de8-4970-8ca1-ccf52f2d7ad8-kube-api-access-jj8fj\") pod \"neutron-operator-controller-manager-6b6c55ffd5-84vzh\" (UID: \"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8\") " pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.916394 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j2k7\" (UniqueName: \"kubernetes.io/projected/fbd303b9-17db-401e-acbf-1ef8219e36df-kube-api-access-7j2k7\") pod \"keystone-operator-controller-manager-7d6f5d799-7p97w\" (UID: \"fbd303b9-17db-401e-acbf-1ef8219e36df\") " pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.940381 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.940403 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.987107 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9"] Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.988122 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:21 crc kubenswrapper[4675]: I1125 12:42:21.992261 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.007203 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz7s6\" (UniqueName: \"kubernetes.io/projected/e6ff98cd-4075-49dd-b40b-d1923298513e-kube-api-access-tz7s6\") pod \"manila-operator-controller-manager-646fd589f9-jdxms\" (UID: \"e6ff98cd-4075-49dd-b40b-d1923298513e\") " pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.007260 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2cqn\" (UniqueName: \"kubernetes.io/projected/8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1-kube-api-access-z2cqn\") pod \"octavia-operator-controller-manager-7979c68bc7-m6zl4\" (UID: \"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1\") " pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.007282 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj8fj\" (UniqueName: \"kubernetes.io/projected/a5a68379-3de8-4970-8ca1-ccf52f2d7ad8-kube-api-access-jj8fj\") pod \"neutron-operator-controller-manager-6b6c55ffd5-84vzh\" (UID: \"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8\") " pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.007318 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsdh7\" (UniqueName: \"kubernetes.io/projected/d4608140-77a4-4067-b58e-a95ae2249fea-kube-api-access-wsdh7\") pod \"nova-operator-controller-manager-79d658b66d-dls9t\" (UID: \"d4608140-77a4-4067-b58e-a95ae2249fea\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.030845 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-ff2p8" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.031134 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.048431 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.049457 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.065337 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-h7b69" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.069111 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.073341 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsdh7\" (UniqueName: \"kubernetes.io/projected/d4608140-77a4-4067-b58e-a95ae2249fea-kube-api-access-wsdh7\") pod \"nova-operator-controller-manager-79d658b66d-dls9t\" (UID: \"d4608140-77a4-4067-b58e-a95ae2249fea\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.079170 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.099028 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz7s6\" (UniqueName: \"kubernetes.io/projected/e6ff98cd-4075-49dd-b40b-d1923298513e-kube-api-access-tz7s6\") pod \"manila-operator-controller-manager-646fd589f9-jdxms\" (UID: \"e6ff98cd-4075-49dd-b40b-d1923298513e\") " pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.104842 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj8fj\" (UniqueName: \"kubernetes.io/projected/a5a68379-3de8-4970-8ca1-ccf52f2d7ad8-kube-api-access-jj8fj\") pod \"neutron-operator-controller-manager-6b6c55ffd5-84vzh\" (UID: \"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8\") " pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.113491 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2cqn\" (UniqueName: \"kubernetes.io/projected/8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1-kube-api-access-z2cqn\") pod \"octavia-operator-controller-manager-7979c68bc7-m6zl4\" (UID: \"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1\") " pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.113564 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.114497 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27wsx\" (UniqueName: \"kubernetes.io/projected/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-kube-api-access-27wsx\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.114557 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.114581 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdgqq\" (UniqueName: \"kubernetes.io/projected/9495eb50-984d-4069-bd95-719e714b1178-kube-api-access-wdgqq\") pod \"ovn-operator-controller-manager-5b67cfc8fb-4pmkv\" (UID: \"9495eb50-984d-4069-bd95-719e714b1178\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.120296 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.121347 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.131082 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-nt2qj" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.146345 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.205070 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.206380 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.227142 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8gnq\" (UniqueName: \"kubernetes.io/projected/88da95fd-fdf9-402d-90d8-e742f92cffbb-kube-api-access-g8gnq\") pod \"telemetry-operator-controller-manager-58487d9bf4-9rf4d\" (UID: \"88da95fd-fdf9-402d-90d8-e742f92cffbb\") " pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.227202 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27wsx\" (UniqueName: \"kubernetes.io/projected/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-kube-api-access-27wsx\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.227238 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.227259 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdgqq\" (UniqueName: \"kubernetes.io/projected/9495eb50-984d-4069-bd95-719e714b1178-kube-api-access-wdgqq\") pod \"ovn-operator-controller-manager-5b67cfc8fb-4pmkv\" (UID: \"9495eb50-984d-4069-bd95-719e714b1178\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:42:22 crc kubenswrapper[4675]: E1125 12:42:22.228029 4675 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 12:42:22 crc kubenswrapper[4675]: E1125 12:42:22.228079 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert podName:bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48 nodeName:}" failed. No retries permitted until 2025-11-25 12:42:22.728061353 +0000 UTC m=+887.899653694 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-xxcn9" (UID: "bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.228417 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-pngsz" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.232159 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.238878 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.240104 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.241120 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.251580 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-7qmsf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.277235 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.291628 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27wsx\" (UniqueName: \"kubernetes.io/projected/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-kube-api-access-27wsx\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.300111 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.309105 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.309360 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdgqq\" (UniqueName: \"kubernetes.io/projected/9495eb50-984d-4069-bd95-719e714b1178-kube-api-access-wdgqq\") pod \"ovn-operator-controller-manager-5b67cfc8fb-4pmkv\" (UID: \"9495eb50-984d-4069-bd95-719e714b1178\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.309778 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.329058 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8gnq\" (UniqueName: \"kubernetes.io/projected/88da95fd-fdf9-402d-90d8-e742f92cffbb-kube-api-access-g8gnq\") pod \"telemetry-operator-controller-manager-58487d9bf4-9rf4d\" (UID: \"88da95fd-fdf9-402d-90d8-e742f92cffbb\") " pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.329144 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcvr7\" (UniqueName: \"kubernetes.io/projected/6fa6f393-fc29-4035-81da-a9965421c77f-kube-api-access-bcvr7\") pod \"swift-operator-controller-manager-cc9f5bc5c-lr9bx\" (UID: \"6fa6f393-fc29-4035-81da-a9965421c77f\") " pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.329166 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht9cw\" (UniqueName: \"kubernetes.io/projected/e8f46595-6a0c-4b55-9839-3360395606f7-kube-api-access-ht9cw\") pod \"placement-operator-controller-manager-867d87977b-2z8vf\" (UID: \"e8f46595-6a0c-4b55-9839-3360395606f7\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.330886 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-qxk2b" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.337386 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.385488 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8gnq\" (UniqueName: \"kubernetes.io/projected/88da95fd-fdf9-402d-90d8-e742f92cffbb-kube-api-access-g8gnq\") pod \"telemetry-operator-controller-manager-58487d9bf4-9rf4d\" (UID: \"88da95fd-fdf9-402d-90d8-e742f92cffbb\") " pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.389403 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.428140 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.429518 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.434341 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.434705 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjmkj\" (UniqueName: \"kubernetes.io/projected/2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12-kube-api-access-gjmkj\") pod \"test-operator-controller-manager-77db6bf9c-rkgfz\" (UID: \"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12\") " pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.434768 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcvr7\" (UniqueName: \"kubernetes.io/projected/6fa6f393-fc29-4035-81da-a9965421c77f-kube-api-access-bcvr7\") pod \"swift-operator-controller-manager-cc9f5bc5c-lr9bx\" (UID: \"6fa6f393-fc29-4035-81da-a9965421c77f\") " pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.434794 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht9cw\" (UniqueName: \"kubernetes.io/projected/e8f46595-6a0c-4b55-9839-3360395606f7-kube-api-access-ht9cw\") pod \"placement-operator-controller-manager-867d87977b-2z8vf\" (UID: \"e8f46595-6a0c-4b55-9839-3360395606f7\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.441649 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-hvkvf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.461584 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.464397 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.509597 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.524635 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcvr7\" (UniqueName: \"kubernetes.io/projected/6fa6f393-fc29-4035-81da-a9965421c77f-kube-api-access-bcvr7\") pod \"swift-operator-controller-manager-cc9f5bc5c-lr9bx\" (UID: \"6fa6f393-fc29-4035-81da-a9965421c77f\") " pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.525100 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht9cw\" (UniqueName: \"kubernetes.io/projected/e8f46595-6a0c-4b55-9839-3360395606f7-kube-api-access-ht9cw\") pod \"placement-operator-controller-manager-867d87977b-2z8vf\" (UID: \"e8f46595-6a0c-4b55-9839-3360395606f7\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.563419 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2j8g\" (UniqueName: \"kubernetes.io/projected/a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb-kube-api-access-n2j8g\") pod \"watcher-operator-controller-manager-6b56b8849f-r6m74\" (UID: \"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.564003 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjmkj\" (UniqueName: \"kubernetes.io/projected/2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12-kube-api-access-gjmkj\") pod \"test-operator-controller-manager-77db6bf9c-rkgfz\" (UID: \"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12\") " pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.590902 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.614648 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.666315 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2j8g\" (UniqueName: \"kubernetes.io/projected/a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb-kube-api-access-n2j8g\") pod \"watcher-operator-controller-manager-6b56b8849f-r6m74\" (UID: \"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.732442 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2j8g\" (UniqueName: \"kubernetes.io/projected/a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb-kube-api-access-n2j8g\") pod \"watcher-operator-controller-manager-6b56b8849f-r6m74\" (UID: \"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.781157 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.782244 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.783478 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:22 crc kubenswrapper[4675]: E1125 12:42:22.783634 4675 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 12:42:22 crc kubenswrapper[4675]: E1125 12:42:22.783691 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert podName:bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48 nodeName:}" failed. No retries permitted until 2025-11-25 12:42:23.78367213 +0000 UTC m=+888.955264471 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-xxcn9" (UID: "bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.784661 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjmkj\" (UniqueName: \"kubernetes.io/projected/2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12-kube-api-access-gjmkj\") pod \"test-operator-controller-manager-77db6bf9c-rkgfz\" (UID: \"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12\") " pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.799390 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.813254 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.813674 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-dkh98" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.865794 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.885945 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5689899996-24rxr"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.886308 4675 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" secret="" err="failed to sync secret cache: timed out waiting for the condition" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.886348 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.886551 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvjj8\" (UniqueName: \"kubernetes.io/projected/21978291-afd8-477d-9e86-80a465441902-kube-api-access-qvjj8\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.886609 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/21978291-afd8-477d-9e86-80a465441902-cert\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.895198 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.898261 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-qcbwl" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.904956 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.909732 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-cfxt2" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.914165 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.939423 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh"] Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.969320 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.987569 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvjj8\" (UniqueName: \"kubernetes.io/projected/21978291-afd8-477d-9e86-80a465441902-kube-api-access-qvjj8\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:22 crc kubenswrapper[4675]: I1125 12:42:22.987629 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/21978291-afd8-477d-9e86-80a465441902-cert\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:22 crc kubenswrapper[4675]: E1125 12:42:22.987771 4675 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 12:42:22 crc kubenswrapper[4675]: E1125 12:42:22.987855 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21978291-afd8-477d-9e86-80a465441902-cert podName:21978291-afd8-477d-9e86-80a465441902 nodeName:}" failed. No retries permitted until 2025-11-25 12:42:23.487808401 +0000 UTC m=+888.659400742 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/21978291-afd8-477d-9e86-80a465441902-cert") pod "openstack-operator-controller-manager-75cf7cf5cb-gbbjk" (UID: "21978291-afd8-477d-9e86-80a465441902") : secret "webhook-server-cert" not found Nov 25 12:42:22 crc kubenswrapper[4675]: W1125 12:42:22.992224 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod966aefc3_6c87_4e64_b9ae_0c175f4d18a3.slice/crio-8cd223d5e808731a28e5220e761646fd691d007be4a4376e68ebb7585d703a61 WatchSource:0}: Error finding container 8cd223d5e808731a28e5220e761646fd691d007be4a4376e68ebb7585d703a61: Status 404 returned error can't find the container with id 8cd223d5e808731a28e5220e761646fd691d007be4a4376e68ebb7585d703a61 Nov 25 12:42:23 crc kubenswrapper[4675]: W1125 12:42:22.999756 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad1d87d3_79d6_43d9_adc5_0ae5b52fa6e3.slice/crio-c576246f7a50c01fdad02cd76429b583c59bd11ed0a75374604ad7a5241d7390 WatchSource:0}: Error finding container c576246f7a50c01fdad02cd76429b583c59bd11ed0a75374604ad7a5241d7390: Status 404 returned error can't find the container with id c576246f7a50c01fdad02cd76429b583c59bd11ed0a75374604ad7a5241d7390 Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.029209 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvjj8\" (UniqueName: \"kubernetes.io/projected/21978291-afd8-477d-9e86-80a465441902-kube-api-access-qvjj8\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.093721 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z4fj\" (UniqueName: \"kubernetes.io/projected/64b432ef-6de9-4d8d-84ce-78f2097bf31e-kube-api-access-8z4fj\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-msgjh\" (UID: \"64b432ef-6de9-4d8d-84ce-78f2097bf31e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.171186 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerStarted","Data":"8cd223d5e808731a28e5220e761646fd691d007be4a4376e68ebb7585d703a61"} Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.177413 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerStarted","Data":"c576246f7a50c01fdad02cd76429b583c59bd11ed0a75374604ad7a5241d7390"} Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.194741 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z4fj\" (UniqueName: \"kubernetes.io/projected/64b432ef-6de9-4d8d-84ce-78f2097bf31e-kube-api-access-8z4fj\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-msgjh\" (UID: \"64b432ef-6de9-4d8d-84ce-78f2097bf31e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.215041 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.220554 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z4fj\" (UniqueName: \"kubernetes.io/projected/64b432ef-6de9-4d8d-84ce-78f2097bf31e-kube-api-access-8z4fj\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-msgjh\" (UID: \"64b432ef-6de9-4d8d-84ce-78f2097bf31e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.311609 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.471392 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.504065 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/21978291-afd8-477d-9e86-80a465441902-cert\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.507090 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/21978291-afd8-477d-9e86-80a465441902-cert\") pod \"openstack-operator-controller-manager-75cf7cf5cb-gbbjk\" (UID: \"21978291-afd8-477d-9e86-80a465441902\") " pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.740066 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.809001 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.827104 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-xxcn9\" (UID: \"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.861174 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.887890 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.901949 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.920903 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.930187 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.941679 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.953360 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh"] Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.970946 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9"] Nov 25 12:42:23 crc kubenswrapper[4675]: W1125 12:42:23.976327 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5a68379_3de8_4970_8ca1_ccf52f2d7ad8.slice/crio-afeebbd099199c1e50a2eec59e83a141afebc64ce189529b330c628fce153314 WatchSource:0}: Error finding container afeebbd099199c1e50a2eec59e83a141afebc64ce189529b330c628fce153314: Status 404 returned error can't find the container with id afeebbd099199c1e50a2eec59e83a141afebc64ce189529b330c628fce153314 Nov 25 12:42:23 crc kubenswrapper[4675]: I1125 12:42:23.991454 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.008334 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv"] Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.009599 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8abddd77_b0e2_4fd7_bdf9_76f4f9f76fd1.slice/crio-9d072182937e33d966342d55199a1a955f3396993af1e0fff79d789e3077e4a6 WatchSource:0}: Error finding container 9d072182937e33d966342d55199a1a955f3396993af1e0fff79d789e3077e4a6: Status 404 returned error can't find the container with id 9d072182937e33d966342d55199a1a955f3396993af1e0fff79d789e3077e4a6 Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.010464 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.028651 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.038007 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.040778 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf"] Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.051711 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbd303b9_17db_401e_acbf_1ef8219e36df.slice/crio-bc8b151d80f5383d94269c246407e5bfb4f6f67d1b53f38bffb2a3c8c12af6e9 WatchSource:0}: Error finding container bc8b151d80f5383d94269c246407e5bfb4f6f67d1b53f38bffb2a3c8c12af6e9: Status 404 returned error can't find the container with id bc8b151d80f5383d94269c246407e5bfb4f6f67d1b53f38bffb2a3c8c12af6e9 Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.061507 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9495eb50_984d_4069_bd95_719e714b1178.slice/crio-fac45835a07a3be5415c00ba32e51446be46b9c11c926841d0551aeb7d77c9c5 WatchSource:0}: Error finding container fac45835a07a3be5415c00ba32e51446be46b9c11c926841d0551aeb7d77c9c5: Status 404 returned error can't find the container with id fac45835a07a3be5415c00ba32e51446be46b9c11c926841d0551aeb7d77c9c5 Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.063590 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88da95fd_fdf9_402d_90d8_e742f92cffbb.slice/crio-685cd186d58d78c31cbcabe2b0f2015ca5b745469dfb8ff0c8921f3ecdd39304 WatchSource:0}: Error finding container 685cd186d58d78c31cbcabe2b0f2015ca5b745469dfb8ff0c8921f3ecdd39304: Status 404 returned error can't find the container with id 685cd186d58d78c31cbcabe2b0f2015ca5b745469dfb8ff0c8921f3ecdd39304 Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.064849 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8f46595_6a0c_4b55_9839_3360395606f7.slice/crio-7dce30a92d18bce2caa7870bc20ef8898a1511d9357052a9f32b6bd9feba773d WatchSource:0}: Error finding container 7dce30a92d18bce2caa7870bc20ef8898a1511d9357052a9f32b6bd9feba773d: Status 404 returned error can't find the container with id 7dce30a92d18bce2caa7870bc20ef8898a1511d9357052a9f32b6bd9feba773d Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.067343 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod986b1a58_05d0_4beb_9199_a7564c809455.slice/crio-cf7e1b70da1d2486f683e6a49dc5de70111994f223a57573a62491b01c6aa166 WatchSource:0}: Error finding container cf7e1b70da1d2486f683e6a49dc5de70111994f223a57573a62491b01c6aa166: Status 404 returned error can't find the container with id cf7e1b70da1d2486f683e6a49dc5de70111994f223a57573a62491b01c6aa166 Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.078759 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18941428_e287_4374_93e0_3209cdbbf7d7.slice/crio-4157fecbb3cf7dd5932e1cd12e97c15a6f75fb7c6da7171d99a5dfaeb0f85329 WatchSource:0}: Error finding container 4157fecbb3cf7dd5932e1cd12e97c15a6f75fb7c6da7171d99a5dfaeb0f85329: Status 404 returned error can't find the container with id 4157fecbb3cf7dd5932e1cd12e97c15a6f75fb7c6da7171d99a5dfaeb0f85329 Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.096745 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fa6f393_fc29_4035_81da_a9965421c77f.slice/crio-2fa2c48254d65709e60598b4046506f8123b8b14f927f6940264e1c9d2f1a98b WatchSource:0}: Error finding container 2fa2c48254d65709e60598b4046506f8123b8b14f927f6940264e1c9d2f1a98b: Status 404 returned error can't find the container with id 2fa2c48254d65709e60598b4046506f8123b8b14f927f6940264e1c9d2f1a98b Nov 25 12:42:24 crc kubenswrapper[4675]: E1125 12:42:24.099379 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:6f630b256a17a0d40ec49bbf3bfbc65118e712cafea97fb0eee03dbc037d6bf8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kndlx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 12:42:24 crc kubenswrapper[4675]: E1125 12:42:24.102108 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wdgqq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-5b67cfc8fb-4pmkv_openstack-operators(9495eb50-984d-4069-bd95-719e714b1178): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 12:42:24 crc kubenswrapper[4675]: E1125 12:42:24.103519 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:bc58f62c7171e9c9216fdeafbd170917b638e6c3f842031ee254f1389c57a09e,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bcvr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-cc9f5bc5c-lr9bx_openstack-operators(6fa6f393-fc29-4035-81da-a9965421c77f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.196579 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerStarted","Data":"f551a502a884f39f8785273c720bc368b8ef9ccb694135ad17d9dcf6e9a95ff5"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.197444 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerStarted","Data":"e99008efcb536bb070da4efd783d2cbc8a6c7b9ec3ad9d8846e34b29b0202935"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.198257 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerStarted","Data":"2f57f4bd727cc52f1e54299c7688e3d868e55f5fec1adecfd066320296b65992"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.199228 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerStarted","Data":"cf7e1b70da1d2486f683e6a49dc5de70111994f223a57573a62491b01c6aa166"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.204508 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerStarted","Data":"afeebbd099199c1e50a2eec59e83a141afebc64ce189529b330c628fce153314"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.209827 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerStarted","Data":"bc8b151d80f5383d94269c246407e5bfb4f6f67d1b53f38bffb2a3c8c12af6e9"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.218198 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerStarted","Data":"685cd186d58d78c31cbcabe2b0f2015ca5b745469dfb8ff0c8921f3ecdd39304"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.219686 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerStarted","Data":"fac45835a07a3be5415c00ba32e51446be46b9c11c926841d0551aeb7d77c9c5"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.224652 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerStarted","Data":"2fa2c48254d65709e60598b4046506f8123b8b14f927f6940264e1c9d2f1a98b"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.229440 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerStarted","Data":"ae2ee39edf1338f02f6dad601829cfebfae778c104675c9290f2b52afb490f54"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.244500 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerStarted","Data":"7dce30a92d18bce2caa7870bc20ef8898a1511d9357052a9f32b6bd9feba773d"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.247177 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerStarted","Data":"3765c1afb5cf54dc0903277e50af2773b88c45d56bbe3adf5e09e45403a436fe"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.251048 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerStarted","Data":"d2024007a2a3b028873e99e69da4ffab2e51a9443c612b4d576b1b2d212abf93"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.252551 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.254173 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerStarted","Data":"4157fecbb3cf7dd5932e1cd12e97c15a6f75fb7c6da7171d99a5dfaeb0f85329"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.255949 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerStarted","Data":"9d072182937e33d966342d55199a1a955f3396993af1e0fff79d789e3077e4a6"} Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.301112 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.341329 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-6pvms"] Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.358304 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74"] Nov 25 12:42:24 crc kubenswrapper[4675]: E1125 12:42:24.364180 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8z4fj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_openstack-operators(64b432ef-6de9-4d8d-84ce-78f2097bf31e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 12:42:24 crc kubenswrapper[4675]: E1125 12:42:24.365415 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 12:42:24 crc kubenswrapper[4675]: E1125 12:42:24.369193 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l882d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-54485f899-6pvms_openstack-operators(a271eb36-50fc-40c6-8885-f97f281c1150): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.392306 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk"] Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.418385 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21978291_afd8_477d_9e86_80a465441902.slice/crio-d6dca4306f8713f2d27bcd77bdbc65a71df5610e4b46b3b9b9421dfecfdc4c90 WatchSource:0}: Error finding container d6dca4306f8713f2d27bcd77bdbc65a71df5610e4b46b3b9b9421dfecfdc4c90: Status 404 returned error can't find the container with id d6dca4306f8713f2d27bcd77bdbc65a71df5610e4b46b3b9b9421dfecfdc4c90 Nov 25 12:42:24 crc kubenswrapper[4675]: I1125 12:42:24.691807 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9"] Nov 25 12:42:24 crc kubenswrapper[4675]: W1125 12:42:24.698889 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbcc7bd3d_10ec_47a1_81b2_aa08d2d46c48.slice/crio-37749a388af3fe75808e32b53c802121d0418398264c347db19cd7e447f8eb79 WatchSource:0}: Error finding container 37749a388af3fe75808e32b53c802121d0418398264c347db19cd7e447f8eb79: Status 404 returned error can't find the container with id 37749a388af3fe75808e32b53c802121d0418398264c347db19cd7e447f8eb79 Nov 25 12:42:25 crc kubenswrapper[4675]: I1125 12:42:25.263404 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerStarted","Data":"4632f57db01bec3d6340888e1adfc82b4bcf7e64c8604bc1131c96bbfcfdcf04"} Nov 25 12:42:25 crc kubenswrapper[4675]: I1125 12:42:25.264554 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" event={"ID":"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48","Type":"ContainerStarted","Data":"37749a388af3fe75808e32b53c802121d0418398264c347db19cd7e447f8eb79"} Nov 25 12:42:25 crc kubenswrapper[4675]: I1125 12:42:25.265464 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" event={"ID":"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12","Type":"ContainerStarted","Data":"5d5b375bd24d6480ae3528cbf1565bad4078136f623fb7a558b16011e87f9344"} Nov 25 12:42:25 crc kubenswrapper[4675]: I1125 12:42:25.266444 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerStarted","Data":"1dd17cd630324fa8770b1b4777a03fa96ef8436d76907acfd9b943cdd82f0c61"} Nov 25 12:42:25 crc kubenswrapper[4675]: I1125 12:42:25.267319 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" event={"ID":"64b432ef-6de9-4d8d-84ce-78f2097bf31e","Type":"ContainerStarted","Data":"4c4137206207c3709105afe4b05427bf34918fbfff9fa3e533771858f4375b94"} Nov 25 12:42:25 crc kubenswrapper[4675]: I1125 12:42:25.268224 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerStarted","Data":"d6dca4306f8713f2d27bcd77bdbc65a71df5610e4b46b3b9b9421dfecfdc4c90"} Nov 25 12:42:25 crc kubenswrapper[4675]: E1125 12:42:25.269041 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 12:42:26 crc kubenswrapper[4675]: I1125 12:42:26.275772 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerStarted","Data":"453e6d4296e086c49a158a0f22284e1b39a1d8e959848827c3acd6577191d1d4"} Nov 25 12:42:26 crc kubenswrapper[4675]: E1125 12:42:26.276400 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 12:42:27 crc kubenswrapper[4675]: I1125 12:42:27.283112 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerStarted","Data":"b145b87c0d0d7b169a24f7984ad0383dda72c58b5b29c5835f6ff1163c491e4d"} Nov 25 12:42:29 crc kubenswrapper[4675]: E1125 12:42:29.206203 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 12:42:29 crc kubenswrapper[4675]: I1125 12:42:29.299481 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerStarted","Data":"39327b9763ef7774cee6ad6e7e28c20f643af38b09ffa606f907f6158cb89532"} Nov 25 12:42:29 crc kubenswrapper[4675]: I1125 12:42:29.301456 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerStarted","Data":"b691ac1f334f9c399e9efbbcbeedb8be4b1c2d899550b6d59b6bfbdf42fc0bde"} Nov 25 12:42:29 crc kubenswrapper[4675]: I1125 12:42:29.303060 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerStarted","Data":"0279dc86d896749c7c861424d3a9544c1a53e5573098bae008e9d631ce0f9c6e"} Nov 25 12:42:29 crc kubenswrapper[4675]: E1125 12:42:29.304135 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:6f630b256a17a0d40ec49bbf3bfbc65118e712cafea97fb0eee03dbc037d6bf8\\\"\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 12:42:29 crc kubenswrapper[4675]: E1125 12:42:29.870291 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 12:42:30 crc kubenswrapper[4675]: E1125 12:42:30.113939 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 12:42:30 crc kubenswrapper[4675]: E1125 12:42:30.234896 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 12:42:30 crc kubenswrapper[4675]: E1125 12:42:30.311310 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 12:42:30 crc kubenswrapper[4675]: E1125 12:42:30.311459 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 12:42:30 crc kubenswrapper[4675]: E1125 12:42:30.312115 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:bc58f62c7171e9c9216fdeafbd170917b638e6c3f842031ee254f1389c57a09e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 12:42:42 crc kubenswrapper[4675]: E1125 12:42:42.074716 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:d1fab4998e5f0faf94295eeaebfbf6801921d50497fbfc5331a888b207831486" Nov 25 12:42:42 crc kubenswrapper[4675]: E1125 12:42:42.075390 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:d1fab4998e5f0faf94295eeaebfbf6801921d50497fbfc5331a888b207831486,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tz7s6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-646fd589f9-jdxms_openstack-operators(e6ff98cd-4075-49dd-b40b-d1923298513e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:43 crc kubenswrapper[4675]: I1125 12:42:43.662083 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:42:43 crc kubenswrapper[4675]: I1125 12:42:43.662139 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:42:45 crc kubenswrapper[4675]: E1125 12:42:45.619089 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b" Nov 25 12:42:45 crc kubenswrapper[4675]: E1125 12:42:45.619924 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-27wsx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-77868f484-xxcn9_openstack-operators(bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:46 crc kubenswrapper[4675]: E1125 12:42:46.619637 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:8aaaf8bb0a81358ee196af922d534c9b3f6bb47b27f4283087f7e0254638a671" Nov 25 12:42:46 crc kubenswrapper[4675]: E1125 12:42:46.619853 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:8aaaf8bb0a81358ee196af922d534c9b3f6bb47b27f4283087f7e0254638a671,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5n5ts,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-6788cc6d75-4cprh_openstack-operators(8d89af10-26a8-4d8b-aedf-8e450df0f28a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:48 crc kubenswrapper[4675]: E1125 12:42:48.055650 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57" Nov 25 12:42:48 crc kubenswrapper[4675]: E1125 12:42:48.055881 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wfzvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-698d6fd7d6-nkq7r_openstack-operators(223d4b40-6f09-41f5-816d-7e82b45b4b90): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:48 crc kubenswrapper[4675]: E1125 12:42:48.506357 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d" Nov 25 12:42:48 crc kubenswrapper[4675]: E1125 12:42:48.506797 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ht9cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-867d87977b-2z8vf_openstack-operators(e8f46595-6a0c-4b55-9839-3360395606f7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:50 crc kubenswrapper[4675]: E1125 12:42:50.191509 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:15f40442c6369e89c7d9b9cd82c28f10bbaddbef566f34c9597ca9ffaf6a5ea5" Nov 25 12:42:50 crc kubenswrapper[4675]: E1125 12:42:50.191680 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:15f40442c6369e89c7d9b9cd82c28f10bbaddbef566f34c9597ca9ffaf6a5ea5,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h6xnc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:50 crc kubenswrapper[4675]: E1125 12:42:50.876891 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:e7347bb6a14d0e276a23b13cab8f960920777e71f31060edd4d15ee8f051797a" Nov 25 12:42:50 crc kubenswrapper[4675]: E1125 12:42:50.877443 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:e7347bb6a14d0e276a23b13cab8f960920777e71f31060edd4d15ee8f051797a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7j2k7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7d6f5d799-7p97w_openstack-operators(fbd303b9-17db-401e-acbf-1ef8219e36df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:51 crc kubenswrapper[4675]: E1125 12:42:51.354543 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:624b77b1b44f5e72a6c7d5910b04eb8070c499f83dcf364fb9dc5f2f8cb83c85" Nov 25 12:42:51 crc kubenswrapper[4675]: E1125 12:42:51.354715 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:624b77b1b44f5e72a6c7d5910b04eb8070c499f83dcf364fb9dc5f2f8cb83c85,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gjmkj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-77db6bf9c-rkgfz_openstack-operators(2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:51 crc kubenswrapper[4675]: E1125 12:42:51.917703 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:44c6dcec0d489a675c35e097d92729162bfc2a8cac62d7c8376943ef922e2651" Nov 25 12:42:51 crc kubenswrapper[4675]: E1125 12:42:51.918276 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:44c6dcec0d489a675c35e097d92729162bfc2a8cac62d7c8376943ef922e2651,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-knhzt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-748967c98-4hkh4_openstack-operators(966aefc3-6c87-4e64-b9ae-0c175f4d18a3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:52 crc kubenswrapper[4675]: E1125 12:42:52.394034 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:f657fa5fddbe0d7cdf889002981a743e421cfbcfb396ec38013aa511596f45ef" Nov 25 12:42:52 crc kubenswrapper[4675]: E1125 12:42:52.394192 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:f657fa5fddbe0d7cdf889002981a743e421cfbcfb396ec38013aa511596f45ef,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z2cqn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:52 crc kubenswrapper[4675]: E1125 12:42:52.953277 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:76ad3ddd8c89748b1d9a5f3a0b2f0f47494cdb62e2997610de7febcb12970635" Nov 25 12:42:52 crc kubenswrapper[4675]: E1125 12:42:52.953725 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:76ad3ddd8c89748b1d9a5f3a0b2f0f47494cdb62e2997610de7febcb12970635,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jj8fj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6b6c55ffd5-84vzh_openstack-operators(a5a68379-3de8-4970-8ca1-ccf52f2d7ad8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:53 crc kubenswrapper[4675]: E1125 12:42:53.486033 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:7477e2fea70c83cfca71e1ece83bc6fdab55e890db711b0110817a5afd97c591" Nov 25 12:42:53 crc kubenswrapper[4675]: E1125 12:42:53.486269 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7477e2fea70c83cfca71e1ece83bc6fdab55e890db711b0110817a5afd97c591,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g8gnq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-58487d9bf4-9rf4d_openstack-operators(88da95fd-fdf9-402d-90d8-e742f92cffbb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:42:53 crc kubenswrapper[4675]: E1125 12:42:53.873130 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a" Nov 25 12:42:53 crc kubenswrapper[4675]: E1125 12:42:53.873333 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n2j8g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6b56b8849f-r6m74_openstack-operators(a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:43:00 crc kubenswrapper[4675]: E1125 12:43:00.193747 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:6f630b256a17a0d40ec49bbf3bfbc65118e712cafea97fb0eee03dbc037d6bf8" Nov 25 12:43:00 crc kubenswrapper[4675]: E1125 12:43:00.194447 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:6f630b256a17a0d40ec49bbf3bfbc65118e712cafea97fb0eee03dbc037d6bf8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kndlx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:43:00 crc kubenswrapper[4675]: E1125 12:43:00.195984 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 12:43:03 crc kubenswrapper[4675]: E1125 12:43:03.612234 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 25 12:43:03 crc kubenswrapper[4675]: E1125 12:43:03.612795 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8z4fj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_openstack-operators(64b432ef-6de9-4d8d-84ce-78f2097bf31e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:43:03 crc kubenswrapper[4675]: E1125 12:43:03.614373 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 12:43:03 crc kubenswrapper[4675]: E1125 12:43:03.745408 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.138:5001/openstack-k8s-operators/barbican-operator:71e80361a42941d3e4ed3b11c977f7c7ff974649" Nov 25 12:43:03 crc kubenswrapper[4675]: E1125 12:43:03.745466 4675 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.138:5001/openstack-k8s-operators/barbican-operator:71e80361a42941d3e4ed3b11c977f7c7ff974649" Nov 25 12:43:03 crc kubenswrapper[4675]: E1125 12:43:03.745608 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.138:5001/openstack-k8s-operators/barbican-operator:71e80361a42941d3e4ed3b11c977f7c7ff974649,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gjbcv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.259012 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podUID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.268920 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.269078 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podUID="e8f46595-6a0c-4b55-9839-3360395606f7" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.301475 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podUID="e6ff98cd-4075-49dd-b40b-d1923298513e" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.384548 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podUID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.405422 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podUID="223d4b40-6f09-41f5-816d-7e82b45b4b90" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.421968 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.446504 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podUID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.450394 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podUID="88da95fd-fdf9-402d-90d8-e742f92cffbb" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.451050 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.479559 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" podUID="bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.499628 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" podUID="2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.553628 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerStarted","Data":"7ef2e3e73361ad76c4ff1643612e8487f21b24924746e394047febb9c0c092d8"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.580703 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerStarted","Data":"7ba9db2a5c2633506eb0837de11caec2bf9a9c43b979dab27cd0aea75ed9f746"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.586436 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" event={"ID":"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12","Type":"ContainerStarted","Data":"a38f1d4bbab7814165f341105989159e015ece0f8251e5a3c6970a57617d3c2e"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.626395 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerStarted","Data":"8f657c44b138f2f212bb851cc2ddcc13d1c7e6b111f3d6d1b365b9fa1ae17bbd"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.626997 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.658448 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerStarted","Data":"55146adbbcdb3f3bf90fcc3100ba2b57d12511164f1f7feceb5b5d0e917350fa"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.665371 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerStarted","Data":"edec7a372e00bcd5417ed5057b235c9e190888ea4dc0319e42a154a15c7d6677"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.668322 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerStarted","Data":"140c8f23fe321962ed389be9244bbb312d4f9977c1df359cd469b4616364c1fb"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.669976 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerStarted","Data":"87541648b576f94c4315aaf772d4e3c3ab7315e9b9850945db55b5d0806992c8"} Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.670741 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podUID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.671885 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podUID="fbd303b9-17db-401e-acbf-1ef8219e36df" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.678963 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" event={"ID":"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48","Type":"ContainerStarted","Data":"b699a604e09d06d079894f0fbb1df66eea6cfee28bf2bfe6b4d35c4cafda3fda"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.689699 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerStarted","Data":"b832cb3d8e223f8405e19c710546e9945b71427e23ba75f0b48d1f47ee817ae4"} Nov 25 12:43:04 crc kubenswrapper[4675]: E1125 12:43:04.698127 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.138:5001/openstack-k8s-operators/barbican-operator:71e80361a42941d3e4ed3b11c977f7c7ff974649\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.704324 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerStarted","Data":"974aa3462e9cda69134179b7906911f488044f8e81e6f2b8ba4712df18b0a6ab"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.704987 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.706286 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerStarted","Data":"c6104884d5899b5cd5a6819ed021406521cc03bdb8383632afc3f4bdf2c3fc75"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.707669 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerStarted","Data":"0ced35a4cfcc7c09f6e43b2b96dfc7622f7b4cd75e77f2a71654181427fe17dd"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.710510 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerStarted","Data":"8b3ad283248d307ce17904cd42a52c836d75caa6a84c23f6094ea278c2b4023f"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.724391 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerStarted","Data":"81a79b7ea50f14cd19d60492c205f07cb67116697f8d963847b64b764b5864b1"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.793764 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerStarted","Data":"d341685012329cf60dad004a3d06fdac1ee9a567df53723e123a096aebf13959"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.794696 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.815582 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerStarted","Data":"e4f4af599c925ce776a9fcf054ac7fa3862a26183edd48b06212e05ade7d8115"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.831691 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerStarted","Data":"8f6e85c04ca69e011aac79fe57f07e0a4c9af3b3dd619b7a81c8e6e179bf3e56"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.847052 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerStarted","Data":"a6bbf4a6e19d4933e013f93be2194caabc87f80595074a807af5e1de38145ce7"} Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.847932 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:43:04 crc kubenswrapper[4675]: I1125 12:43:04.868309 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.040640 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podStartSLOduration=4.500097724 podStartE2EDuration="44.04062457s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.369090042 +0000 UTC m=+889.540682383" lastFinishedPulling="2025-11-25 12:43:03.909616888 +0000 UTC m=+929.081209229" observedRunningTime="2025-11-25 12:43:05.005541554 +0000 UTC m=+930.177133925" watchObservedRunningTime="2025-11-25 12:43:05.04062457 +0000 UTC m=+930.212216911" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.186124 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podStartSLOduration=4.471501069 podStartE2EDuration="44.186103164s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.101807297 +0000 UTC m=+889.273399638" lastFinishedPulling="2025-11-25 12:43:03.816409402 +0000 UTC m=+928.988001733" observedRunningTime="2025-11-25 12:43:05.179834669 +0000 UTC m=+930.351427030" watchObservedRunningTime="2025-11-25 12:43:05.186103164 +0000 UTC m=+930.357695505" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.236356 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" podStartSLOduration=43.236331595 podStartE2EDuration="43.236331595s" podCreationTimestamp="2025-11-25 12:42:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:43:05.232913263 +0000 UTC m=+930.404505614" watchObservedRunningTime="2025-11-25 12:43:05.236331595 +0000 UTC m=+930.407923936" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.416866 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podStartSLOduration=4.799008862 podStartE2EDuration="44.416850535s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.10343098 +0000 UTC m=+889.275023321" lastFinishedPulling="2025-11-25 12:43:03.721272633 +0000 UTC m=+928.892864994" observedRunningTime="2025-11-25 12:43:05.375839574 +0000 UTC m=+930.547431915" watchObservedRunningTime="2025-11-25 12:43:05.416850535 +0000 UTC m=+930.588442866" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.866903 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerStarted","Data":"65b1b00d688e995573485483c0999de06527a83ee65476aaeb02336a9693a84d"} Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.867804 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.876787 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerStarted","Data":"3fd273f137e8ecc7ac272339676812ae9aad264f50ada69b32dfad3a4692de7d"} Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.876939 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.877986 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerStarted","Data":"3fd382a309c15226522f78949ff80e3243e567275b66bca7724d2651c4a32fc2"} Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.879067 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerStarted","Data":"584b40754ae15f75ac9717c50f99f397eda535d94b5c9deb1187b18400c949fe"} Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.884756 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerStarted","Data":"b8085954e540add73e989a0e26b31d26e3f652c7d34a76e11e7d6b1b8a9b0d41"} Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.910719 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" podStartSLOduration=13.50514027 podStartE2EDuration="44.910699024s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.095308345 +0000 UTC m=+889.266900686" lastFinishedPulling="2025-11-25 12:42:55.500867099 +0000 UTC m=+920.672459440" observedRunningTime="2025-11-25 12:43:05.896188199 +0000 UTC m=+931.067780570" watchObservedRunningTime="2025-11-25 12:43:05.910699024 +0000 UTC m=+931.082291365" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.939122 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podStartSLOduration=13.53481915 podStartE2EDuration="44.939108122s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.097178866 +0000 UTC m=+889.268771207" lastFinishedPulling="2025-11-25 12:42:55.501467838 +0000 UTC m=+920.673060179" observedRunningTime="2025-11-25 12:43:05.937467769 +0000 UTC m=+931.109060110" watchObservedRunningTime="2025-11-25 12:43:05.939108122 +0000 UTC m=+931.110700463" Nov 25 12:43:05 crc kubenswrapper[4675]: I1125 12:43:05.977934 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" podStartSLOduration=12.975722368 podStartE2EDuration="44.97790939s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:23.495157682 +0000 UTC m=+888.666750023" lastFinishedPulling="2025-11-25 12:42:55.497344704 +0000 UTC m=+920.668937045" observedRunningTime="2025-11-25 12:43:05.972636168 +0000 UTC m=+931.144228529" watchObservedRunningTime="2025-11-25 12:43:05.97790939 +0000 UTC m=+931.149501741" Nov 25 12:43:06 crc kubenswrapper[4675]: E1125 12:43:06.100134 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.138:5001/openstack-k8s-operators/barbican-operator:71e80361a42941d3e4ed3b11c977f7c7ff974649\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.892105 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" event={"ID":"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12","Type":"ContainerStarted","Data":"516b2b20641a13a4596239c08ebdfdb2fa77d0b3f2ebff526af35c2d30b1c886"} Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.892483 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.894481 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerStarted","Data":"795f466779587cb11748d192af45c1c5270921a968dd364bc26091a80cbd27b7"} Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.894514 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.895401 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.909680 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" podStartSLOduration=4.655865914 podStartE2EDuration="45.90966336s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.362280679 +0000 UTC m=+889.533873020" lastFinishedPulling="2025-11-25 12:43:05.616078125 +0000 UTC m=+930.787670466" observedRunningTime="2025-11-25 12:43:06.905551346 +0000 UTC m=+932.077143687" watchObservedRunningTime="2025-11-25 12:43:06.90966336 +0000 UTC m=+932.081255701" Nov 25 12:43:06 crc kubenswrapper[4675]: I1125 12:43:06.928315 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podStartSLOduration=4.393135239 podStartE2EDuration="45.92830176s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.079770897 +0000 UTC m=+889.251363238" lastFinishedPulling="2025-11-25 12:43:05.614937418 +0000 UTC m=+930.786529759" observedRunningTime="2025-11-25 12:43:06.923596535 +0000 UTC m=+932.095188876" watchObservedRunningTime="2025-11-25 12:43:06.92830176 +0000 UTC m=+932.099894091" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.901241 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerStarted","Data":"061ecdf9aa2d0f61605f4accb5e6b453e9328d3716a9b60260095559670ee9c9"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.902079 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.903607 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerStarted","Data":"5fbfece02fe84e009a5d9924115a3d3f5a247d2e50ff39d22cfd53a44aa51915"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.904071 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.905862 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerStarted","Data":"d6f186a95189e2a38ebddc8a2c833e6b10afdb17812bf5904309989b130e47a1"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.906198 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.907832 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerStarted","Data":"6acc2abc8938e97f60eb5f959bb28d00e64d6631a975cd53e288956fa98825e4"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.908208 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.909916 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerStarted","Data":"45e4f3a968a2c8cab5ba3984005c94977c2d9e1a56138690e5a2607101e378ea"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.910307 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.912066 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerStarted","Data":"4738d8d88de0407838d6011e7e0aa3a8cb6b91f80e567b16bc0e02fb8324d0b7"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.912453 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.914135 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerStarted","Data":"d983778d43e5dd18a742a7903be363e738ff496a4edc1825077bfc8e4e6ec8b1"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.914555 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.916329 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerStarted","Data":"990b012da81571e2297f728312722bde123011371a34a3b8401a7d084b744334"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.916867 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.918249 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerStarted","Data":"4aa80bec7c4faa4ccae0910a81ac4bd6fb51056a689f0a3703ab648a6b275892"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.918605 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.920163 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerStarted","Data":"3b3c2e64f3037f4fe2358136d3b2de543be088851ee8c98e61218aeb327ab3a5"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.920534 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.922687 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" event={"ID":"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48","Type":"ContainerStarted","Data":"8b0bd41d396f8608e02a965008b0e62af913967a6c272816d00de55040ec1e86"} Nov 25 12:43:07 crc kubenswrapper[4675]: I1125 12:43:07.922713 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.026887 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podStartSLOduration=4.598454646 podStartE2EDuration="47.0268646s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.081789552 +0000 UTC m=+889.253381893" lastFinishedPulling="2025-11-25 12:43:06.510199496 +0000 UTC m=+931.681791847" observedRunningTime="2025-11-25 12:43:08.019589423 +0000 UTC m=+933.191181764" watchObservedRunningTime="2025-11-25 12:43:08.0268646 +0000 UTC m=+933.198456941" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.279163 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podStartSLOduration=4.812320977 podStartE2EDuration="47.279142785s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.042911032 +0000 UTC m=+889.214503363" lastFinishedPulling="2025-11-25 12:43:06.50973283 +0000 UTC m=+931.681325171" observedRunningTime="2025-11-25 12:43:08.178440424 +0000 UTC m=+933.350032765" watchObservedRunningTime="2025-11-25 12:43:08.279142785 +0000 UTC m=+933.450735126" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.282077 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podStartSLOduration=3.7793922909999997 podStartE2EDuration="47.282065881s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:23.006944316 +0000 UTC m=+888.178536657" lastFinishedPulling="2025-11-25 12:43:06.509617896 +0000 UTC m=+931.681210247" observedRunningTime="2025-11-25 12:43:08.279055122 +0000 UTC m=+933.450647473" watchObservedRunningTime="2025-11-25 12:43:08.282065881 +0000 UTC m=+933.453658222" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.336623 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podStartSLOduration=4.9085225900000005 podStartE2EDuration="47.336603983s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.080701267 +0000 UTC m=+889.252293608" lastFinishedPulling="2025-11-25 12:43:06.50878265 +0000 UTC m=+931.680375001" observedRunningTime="2025-11-25 12:43:08.322235424 +0000 UTC m=+933.493827775" watchObservedRunningTime="2025-11-25 12:43:08.336603983 +0000 UTC m=+933.508196324" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.418987 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podStartSLOduration=4.864269854 podStartE2EDuration="47.418969575s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:23.953654945 +0000 UTC m=+889.125247276" lastFinishedPulling="2025-11-25 12:43:06.508354656 +0000 UTC m=+931.679946997" observedRunningTime="2025-11-25 12:43:08.380713735 +0000 UTC m=+933.552306096" watchObservedRunningTime="2025-11-25 12:43:08.418969575 +0000 UTC m=+933.590561916" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.478478 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podStartSLOduration=3.774059277 podStartE2EDuration="47.478457439s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:23.258668433 +0000 UTC m=+888.430260774" lastFinishedPulling="2025-11-25 12:43:06.963066595 +0000 UTC m=+932.134658936" observedRunningTime="2025-11-25 12:43:08.418409406 +0000 UTC m=+933.590001757" watchObservedRunningTime="2025-11-25 12:43:08.478457439 +0000 UTC m=+933.650049780" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.479448 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podStartSLOduration=4.557287724 podStartE2EDuration="47.479441772s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.043231953 +0000 UTC m=+889.214824294" lastFinishedPulling="2025-11-25 12:43:06.965386001 +0000 UTC m=+932.136978342" observedRunningTime="2025-11-25 12:43:08.478627545 +0000 UTC m=+933.650219876" watchObservedRunningTime="2025-11-25 12:43:08.479441772 +0000 UTC m=+933.651034113" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.517833 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podStartSLOduration=5.026426475 podStartE2EDuration="47.517801865s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.016935074 +0000 UTC m=+889.188527415" lastFinishedPulling="2025-11-25 12:43:06.508310454 +0000 UTC m=+931.679902805" observedRunningTime="2025-11-25 12:43:08.50512807 +0000 UTC m=+933.676720441" watchObservedRunningTime="2025-11-25 12:43:08.517801865 +0000 UTC m=+933.689394206" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.543730 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podStartSLOduration=4.917922529 podStartE2EDuration="47.543710972s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.36261514 +0000 UTC m=+889.534207481" lastFinishedPulling="2025-11-25 12:43:06.988403583 +0000 UTC m=+932.159995924" observedRunningTime="2025-11-25 12:43:08.539225425 +0000 UTC m=+933.710817766" watchObservedRunningTime="2025-11-25 12:43:08.543710972 +0000 UTC m=+933.715303333" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.578047 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podStartSLOduration=4.647985777 podStartE2EDuration="47.578032113s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.054097257 +0000 UTC m=+889.225689599" lastFinishedPulling="2025-11-25 12:43:06.984143594 +0000 UTC m=+932.155735935" observedRunningTime="2025-11-25 12:43:08.564835292 +0000 UTC m=+933.736427643" watchObservedRunningTime="2025-11-25 12:43:08.578032113 +0000 UTC m=+933.749624454" Nov 25 12:43:08 crc kubenswrapper[4675]: I1125 12:43:08.634592 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" podStartSLOduration=5.371591164 podStartE2EDuration="47.63454933s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.700233063 +0000 UTC m=+889.871825414" lastFinishedPulling="2025-11-25 12:43:06.963191229 +0000 UTC m=+932.134783580" observedRunningTime="2025-11-25 12:43:08.626181787 +0000 UTC m=+933.797774138" watchObservedRunningTime="2025-11-25 12:43:08.63454933 +0000 UTC m=+933.806141671" Nov 25 12:43:11 crc kubenswrapper[4675]: I1125 12:43:11.784564 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.081874 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.148507 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.234301 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.240515 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.282323 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.445298 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.513701 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.605238 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.617255 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.803388 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.889277 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 12:43:12 crc kubenswrapper[4675]: I1125 12:43:12.973557 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.662795 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.662895 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.662953 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.663746 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc913b0681d35b11dd746803fa5089245c866bc325aff60631a14bc726556c0f"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.663880 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://cc913b0681d35b11dd746803fa5089245c866bc325aff60631a14bc726556c0f" gracePeriod=600 Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.873043 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.967898 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="cc913b0681d35b11dd746803fa5089245c866bc325aff60631a14bc726556c0f" exitCode=0 Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.968356 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"cc913b0681d35b11dd746803fa5089245c866bc325aff60631a14bc726556c0f"} Nov 25 12:43:13 crc kubenswrapper[4675]: I1125 12:43:13.968435 4675 scope.go:117] "RemoveContainer" containerID="4b3c1e1ef52f16910a00a563d21fee3feb92e0dcf81ba47871ed8bc9505c87d4" Nov 25 12:43:14 crc kubenswrapper[4675]: I1125 12:43:14.975904 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"0197b926bc3d167c79e69a56309b09a9d11fe04e548d7c1fd85fdf36e1e96e54"} Nov 25 12:43:15 crc kubenswrapper[4675]: E1125 12:43:15.538674 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 12:43:15 crc kubenswrapper[4675]: E1125 12:43:15.539102 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:6f630b256a17a0d40ec49bbf3bfbc65118e712cafea97fb0eee03dbc037d6bf8\\\"\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 12:43:17 crc kubenswrapper[4675]: I1125 12:43:17.534295 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 12:43:17 crc kubenswrapper[4675]: I1125 12:43:17.995744 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerStarted","Data":"5812f299f3081490ef9642539720500dc151f5d3dbf453185e0464e66fd1f19e"} Nov 25 12:43:17 crc kubenswrapper[4675]: I1125 12:43:17.997078 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:43:18 crc kubenswrapper[4675]: I1125 12:43:18.021087 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podStartSLOduration=2.308383178 podStartE2EDuration="57.021069015s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:23.007257037 +0000 UTC m=+888.178849378" lastFinishedPulling="2025-11-25 12:43:17.719942874 +0000 UTC m=+942.891535215" observedRunningTime="2025-11-25 12:43:18.014981926 +0000 UTC m=+943.186574267" watchObservedRunningTime="2025-11-25 12:43:18.021069015 +0000 UTC m=+943.192661356" Nov 25 12:43:21 crc kubenswrapper[4675]: I1125 12:43:21.561081 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 12:43:21 crc kubenswrapper[4675]: I1125 12:43:21.703187 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 12:43:21 crc kubenswrapper[4675]: I1125 12:43:21.943419 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 12:43:21 crc kubenswrapper[4675]: I1125 12:43:21.943511 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 12:43:21 crc kubenswrapper[4675]: I1125 12:43:21.994710 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 12:43:28 crc kubenswrapper[4675]: I1125 12:43:28.062881 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerStarted","Data":"02dfa88b12197fdadfd2aa144f0b9efa359284fda4897a2c6d10709625ce22d3"} Nov 25 12:43:28 crc kubenswrapper[4675]: I1125 12:43:28.063678 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:43:28 crc kubenswrapper[4675]: I1125 12:43:28.065699 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" event={"ID":"64b432ef-6de9-4d8d-84ce-78f2097bf31e","Type":"ContainerStarted","Data":"bdddf947ea1a6bdc9dafd93f3f61d64e22cef53a665cc93ccb1d99fea80e52bf"} Nov 25 12:43:28 crc kubenswrapper[4675]: I1125 12:43:28.081477 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podStartSLOduration=3.34854951 podStartE2EDuration="1m7.081463271s" podCreationTimestamp="2025-11-25 12:42:21 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.099255353 +0000 UTC m=+889.270847694" lastFinishedPulling="2025-11-25 12:43:27.832169114 +0000 UTC m=+953.003761455" observedRunningTime="2025-11-25 12:43:28.076475509 +0000 UTC m=+953.248067850" watchObservedRunningTime="2025-11-25 12:43:28.081463271 +0000 UTC m=+953.253055612" Nov 25 12:43:28 crc kubenswrapper[4675]: I1125 12:43:28.091861 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podStartSLOduration=2.622908806 podStartE2EDuration="1m6.091854811s" podCreationTimestamp="2025-11-25 12:42:22 +0000 UTC" firstStartedPulling="2025-11-25 12:42:24.364074678 +0000 UTC m=+889.535667019" lastFinishedPulling="2025-11-25 12:43:27.833020683 +0000 UTC m=+953.004613024" observedRunningTime="2025-11-25 12:43:28.091437697 +0000 UTC m=+953.263030048" watchObservedRunningTime="2025-11-25 12:43:28.091854811 +0000 UTC m=+953.263447152" Nov 25 12:43:31 crc kubenswrapper[4675]: I1125 12:43:31.545659 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 12:43:41 crc kubenswrapper[4675]: I1125 12:43:41.842798 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.321750 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rr675"] Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.323474 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.329912 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.329938 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.330277 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.330342 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-snq9n" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.349977 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rr675"] Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.425926 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsfkw"] Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.427200 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.430012 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.465850 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z82xh\" (UniqueName: \"kubernetes.io/projected/54347346-7778-4546-98cd-e57f464f0bff-kube-api-access-z82xh\") pod \"dnsmasq-dns-675f4bcbfc-rr675\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.465912 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54347346-7778-4546-98cd-e57f464f0bff-config\") pod \"dnsmasq-dns-675f4bcbfc-rr675\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.480915 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsfkw"] Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.566857 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z82xh\" (UniqueName: \"kubernetes.io/projected/54347346-7778-4546-98cd-e57f464f0bff-kube-api-access-z82xh\") pod \"dnsmasq-dns-675f4bcbfc-rr675\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.566909 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.566945 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54347346-7778-4546-98cd-e57f464f0bff-config\") pod \"dnsmasq-dns-675f4bcbfc-rr675\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.567003 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckzj7\" (UniqueName: \"kubernetes.io/projected/36fb8e78-9c2b-4891-aa20-cdd6695e857f-kube-api-access-ckzj7\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.567056 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-config\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.568290 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54347346-7778-4546-98cd-e57f464f0bff-config\") pod \"dnsmasq-dns-675f4bcbfc-rr675\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.587987 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z82xh\" (UniqueName: \"kubernetes.io/projected/54347346-7778-4546-98cd-e57f464f0bff-kube-api-access-z82xh\") pod \"dnsmasq-dns-675f4bcbfc-rr675\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.638908 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.668428 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-config\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.668701 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.668768 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckzj7\" (UniqueName: \"kubernetes.io/projected/36fb8e78-9c2b-4891-aa20-cdd6695e857f-kube-api-access-ckzj7\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.671891 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-config\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.671961 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.686447 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckzj7\" (UniqueName: \"kubernetes.io/projected/36fb8e78-9c2b-4891-aa20-cdd6695e857f-kube-api-access-ckzj7\") pod \"dnsmasq-dns-78dd6ddcc-bsfkw\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:58 crc kubenswrapper[4675]: I1125 12:43:58.742947 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:43:59 crc kubenswrapper[4675]: I1125 12:43:59.099547 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rr675"] Nov 25 12:43:59 crc kubenswrapper[4675]: I1125 12:43:59.184621 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsfkw"] Nov 25 12:43:59 crc kubenswrapper[4675]: W1125 12:43:59.194634 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36fb8e78_9c2b_4891_aa20_cdd6695e857f.slice/crio-611bab1be05d8051f16c474b73ee2de870b63b25cdc7706cd171c8877e0ca956 WatchSource:0}: Error finding container 611bab1be05d8051f16c474b73ee2de870b63b25cdc7706cd171c8877e0ca956: Status 404 returned error can't find the container with id 611bab1be05d8051f16c474b73ee2de870b63b25cdc7706cd171c8877e0ca956 Nov 25 12:43:59 crc kubenswrapper[4675]: I1125 12:43:59.273302 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" event={"ID":"36fb8e78-9c2b-4891-aa20-cdd6695e857f","Type":"ContainerStarted","Data":"611bab1be05d8051f16c474b73ee2de870b63b25cdc7706cd171c8877e0ca956"} Nov 25 12:43:59 crc kubenswrapper[4675]: I1125 12:43:59.274481 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" event={"ID":"54347346-7778-4546-98cd-e57f464f0bff","Type":"ContainerStarted","Data":"f1268d3fad6be7e275a325b395e1b6178382e589f848ad4c7fba6a63e281060b"} Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.688997 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rr675"] Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.728377 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5lrf2"] Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.731605 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.802026 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5lrf2"] Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.802688 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-config\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.802724 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.802827 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vlw4\" (UniqueName: \"kubernetes.io/projected/d81b8eb2-3df1-4aa8-82f6-67af08769677-kube-api-access-5vlw4\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.906714 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-config\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.906757 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.906808 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vlw4\" (UniqueName: \"kubernetes.io/projected/d81b8eb2-3df1-4aa8-82f6-67af08769677-kube-api-access-5vlw4\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.907936 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-config\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.908060 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:00 crc kubenswrapper[4675]: I1125 12:44:00.935960 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vlw4\" (UniqueName: \"kubernetes.io/projected/d81b8eb2-3df1-4aa8-82f6-67af08769677-kube-api-access-5vlw4\") pod \"dnsmasq-dns-666b6646f7-5lrf2\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.057304 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.151669 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsfkw"] Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.177859 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-zqm4p"] Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.179184 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.182705 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-zqm4p"] Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.315401 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.315787 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp4sn\" (UniqueName: \"kubernetes.io/projected/b2df226c-65e0-4517-9e9c-dab78843423f-kube-api-access-pp4sn\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.315880 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-config\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.421942 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-config\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.422027 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.422081 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp4sn\" (UniqueName: \"kubernetes.io/projected/b2df226c-65e0-4517-9e9c-dab78843423f-kube-api-access-pp4sn\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.423637 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-config\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.424367 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.469506 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp4sn\" (UniqueName: \"kubernetes.io/projected/b2df226c-65e0-4517-9e9c-dab78843423f-kube-api-access-pp4sn\") pod \"dnsmasq-dns-57d769cc4f-zqm4p\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.530539 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.782634 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5lrf2"] Nov 25 12:44:01 crc kubenswrapper[4675]: W1125 12:44:01.844173 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd81b8eb2_3df1_4aa8_82f6_67af08769677.slice/crio-9f34d02f8f00e23a317171c848abf6486950e5206a22a49fb7bd11db05ff4e69 WatchSource:0}: Error finding container 9f34d02f8f00e23a317171c848abf6486950e5206a22a49fb7bd11db05ff4e69: Status 404 returned error can't find the container with id 9f34d02f8f00e23a317171c848abf6486950e5206a22a49fb7bd11db05ff4e69 Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.946354 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.949646 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.956565 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.957018 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.965241 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xgbxq" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.965553 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.965714 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.966682 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.966882 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 12:44:01 crc kubenswrapper[4675]: I1125 12:44:01.967119 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.043525 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24ebc7c8-8b87-487b-90cb-7c26a047b956-pod-info\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.043591 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24ebc7c8-8b87-487b-90cb-7c26a047b956-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.043663 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.043700 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.043749 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.043784 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-config-data\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.044051 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-server-conf\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.044133 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.044191 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.044215 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjt7z\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-kube-api-access-cjt7z\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.044505 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.131596 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-zqm4p"] Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.146406 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24ebc7c8-8b87-487b-90cb-7c26a047b956-pod-info\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.146647 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24ebc7c8-8b87-487b-90cb-7c26a047b956-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.149182 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.150650 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24ebc7c8-8b87-487b-90cb-7c26a047b956-pod-info\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.150849 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.151390 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.153189 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.153345 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.153546 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-config-data\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.153719 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.154431 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-config-data\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.154581 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-server-conf\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.155022 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.155157 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.155264 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjt7z\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-kube-api-access-cjt7z\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.155426 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.155823 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-server-conf\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.156191 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.159721 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.161206 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24ebc7c8-8b87-487b-90cb-7c26a047b956-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.164173 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.179230 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjt7z\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-kube-api-access-cjt7z\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.220901 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.311312 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.354864 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.358771 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.363443 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.363672 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.364064 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.364161 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.364298 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.364567 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6mcdr" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.364803 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.373664 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.407385 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" event={"ID":"b2df226c-65e0-4517-9e9c-dab78843423f","Type":"ContainerStarted","Data":"6ae9d7726a41601292c11d006ceb9ac5f46fd3ea22e5513adae6015a6a63da4c"} Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.411013 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" event={"ID":"d81b8eb2-3df1-4aa8-82f6-67af08769677","Type":"ContainerStarted","Data":"9f34d02f8f00e23a317171c848abf6486950e5206a22a49fb7bd11db05ff4e69"} Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459127 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459197 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459244 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459271 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459300 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459319 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd8gq\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-kube-api-access-qd8gq\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459351 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459408 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459433 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459462 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.459490 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.561537 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.561935 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.561975 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562012 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562054 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562091 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562139 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562780 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562838 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562865 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd8gq\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-kube-api-access-qd8gq\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.562919 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.563159 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.563284 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.564440 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.567396 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.568301 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.568801 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.570625 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.571126 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.575275 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.577295 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.612206 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd8gq\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-kube-api-access-qd8gq\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.633302 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.707601 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:44:02 crc kubenswrapper[4675]: I1125 12:44:02.926037 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.429341 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"24ebc7c8-8b87-487b-90cb-7c26a047b956","Type":"ContainerStarted","Data":"436a587a89fafc68b79c485ef5fc56d0e6689bbc1f404f1d8f2d3acaae1f5c7b"} Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.587490 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.703462 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.708930 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.711351 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.712024 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-w5kl7" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.712274 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.712445 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.718786 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.719038 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.719324 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.801906 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802009 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-config-data-default\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802044 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-secrets\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802357 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx9q9\" (UniqueName: \"kubernetes.io/projected/3d992e9b-ee07-4194-90de-02816b3aec1e-kube-api-access-bx9q9\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802386 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-kolla-config\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802418 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3d992e9b-ee07-4194-90de-02816b3aec1e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802453 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802624 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.802656 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905307 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905353 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905384 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905428 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-config-data-default\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905455 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-secrets\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905478 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx9q9\" (UniqueName: \"kubernetes.io/projected/3d992e9b-ee07-4194-90de-02816b3aec1e-kube-api-access-bx9q9\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905521 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-kolla-config\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905553 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3d992e9b-ee07-4194-90de-02816b3aec1e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.905587 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.907474 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.913324 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-secrets\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.919071 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.919561 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-kolla-config\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.919642 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3d992e9b-ee07-4194-90de-02816b3aec1e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.919957 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.920925 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3d992e9b-ee07-4194-90de-02816b3aec1e-config-data-default\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.921512 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d992e9b-ee07-4194-90de-02816b3aec1e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:03 crc kubenswrapper[4675]: I1125 12:44:03.962738 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx9q9\" (UniqueName: \"kubernetes.io/projected/3d992e9b-ee07-4194-90de-02816b3aec1e-kube-api-access-bx9q9\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:04 crc kubenswrapper[4675]: I1125 12:44:04.001560 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"3d992e9b-ee07-4194-90de-02816b3aec1e\") " pod="openstack/openstack-galera-0" Nov 25 12:44:04 crc kubenswrapper[4675]: I1125 12:44:04.051303 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 12:44:04 crc kubenswrapper[4675]: I1125 12:44:04.470704 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b","Type":"ContainerStarted","Data":"2d0edc35a9d49ec84a82f7b5afcfd6d0e99c9756323145b1f5c77de414c037a7"} Nov 25 12:44:04 crc kubenswrapper[4675]: I1125 12:44:04.888095 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.200061 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.203124 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.208320 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.208423 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.208522 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.209578 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-rlth7" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.253785 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344235 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344316 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c8756ef3-0fbe-457a-93ed-957baf6a60da-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344361 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344420 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344444 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344465 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344483 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344506 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.344524 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk2b9\" (UniqueName: \"kubernetes.io/projected/c8756ef3-0fbe-457a-93ed-957baf6a60da-kube-api-access-qk2b9\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.399946 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.401378 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.405498 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-b4krb" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.405884 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.406042 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.406447 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449551 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449594 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449618 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449638 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449662 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449680 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk2b9\" (UniqueName: \"kubernetes.io/projected/c8756ef3-0fbe-457a-93ed-957baf6a60da-kube-api-access-qk2b9\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449724 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449748 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c8756ef3-0fbe-457a-93ed-957baf6a60da-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.449765 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.450147 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.456091 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c8756ef3-0fbe-457a-93ed-957baf6a60da-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.457399 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.458500 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.459725 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c8756ef3-0fbe-457a-93ed-957baf6a60da-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.478843 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.486854 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk2b9\" (UniqueName: \"kubernetes.io/projected/c8756ef3-0fbe-457a-93ed-957baf6a60da-kube-api-access-qk2b9\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.487684 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.488425 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8756ef3-0fbe-457a-93ed-957baf6a60da-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.554727 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6efda04f-52f8-48b1-9afd-f606c3a72d50-config-data\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.554782 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4q8z\" (UniqueName: \"kubernetes.io/projected/6efda04f-52f8-48b1-9afd-f606c3a72d50-kube-api-access-d4q8z\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.554802 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6efda04f-52f8-48b1-9afd-f606c3a72d50-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.554843 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6efda04f-52f8-48b1-9afd-f606c3a72d50-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.554859 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6efda04f-52f8-48b1-9afd-f606c3a72d50-kolla-config\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.583158 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3d992e9b-ee07-4194-90de-02816b3aec1e","Type":"ContainerStarted","Data":"4a437b8a868b92f45f1c38b5cec3c892a16aa79d1764d0268918a3c9a41e15b6"} Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.588992 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c8756ef3-0fbe-457a-93ed-957baf6a60da\") " pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.657697 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4q8z\" (UniqueName: \"kubernetes.io/projected/6efda04f-52f8-48b1-9afd-f606c3a72d50-kube-api-access-d4q8z\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.657744 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6efda04f-52f8-48b1-9afd-f606c3a72d50-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.657797 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6efda04f-52f8-48b1-9afd-f606c3a72d50-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.657832 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6efda04f-52f8-48b1-9afd-f606c3a72d50-kolla-config\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.658027 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6efda04f-52f8-48b1-9afd-f606c3a72d50-config-data\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.659944 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6efda04f-52f8-48b1-9afd-f606c3a72d50-kolla-config\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.661728 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6efda04f-52f8-48b1-9afd-f606c3a72d50-config-data\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.664757 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6efda04f-52f8-48b1-9afd-f606c3a72d50-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.666992 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6efda04f-52f8-48b1-9afd-f606c3a72d50-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.691552 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4q8z\" (UniqueName: \"kubernetes.io/projected/6efda04f-52f8-48b1-9afd-f606c3a72d50-kube-api-access-d4q8z\") pod \"memcached-0\" (UID: \"6efda04f-52f8-48b1-9afd-f606c3a72d50\") " pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.731042 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 12:44:05 crc kubenswrapper[4675]: I1125 12:44:05.845519 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:06 crc kubenswrapper[4675]: I1125 12:44:06.588723 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 12:44:06 crc kubenswrapper[4675]: W1125 12:44:06.602890 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6efda04f_52f8_48b1_9afd_f606c3a72d50.slice/crio-18ff65f75b4ec8378c9cb042b01f35663b197e1940f7a6400137c622fd145684 WatchSource:0}: Error finding container 18ff65f75b4ec8378c9cb042b01f35663b197e1940f7a6400137c622fd145684: Status 404 returned error can't find the container with id 18ff65f75b4ec8378c9cb042b01f35663b197e1940f7a6400137c622fd145684 Nov 25 12:44:06 crc kubenswrapper[4675]: I1125 12:44:06.706887 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 12:44:06 crc kubenswrapper[4675]: W1125 12:44:06.826996 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8756ef3_0fbe_457a_93ed_957baf6a60da.slice/crio-6566fad7fee5ead6d860143e51349439524f7a7adda1eff9c6132c94a41800ca WatchSource:0}: Error finding container 6566fad7fee5ead6d860143e51349439524f7a7adda1eff9c6132c94a41800ca: Status 404 returned error can't find the container with id 6566fad7fee5ead6d860143e51349439524f7a7adda1eff9c6132c94a41800ca Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.576854 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.578249 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.578348 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.582235 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-tc796" Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.687220 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c8756ef3-0fbe-457a-93ed-957baf6a60da","Type":"ContainerStarted","Data":"6566fad7fee5ead6d860143e51349439524f7a7adda1eff9c6132c94a41800ca"} Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.695469 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6efda04f-52f8-48b1-9afd-f606c3a72d50","Type":"ContainerStarted","Data":"18ff65f75b4ec8378c9cb042b01f35663b197e1940f7a6400137c622fd145684"} Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.719769 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gb8x\" (UniqueName: \"kubernetes.io/projected/f8d262fe-fd03-43eb-a9d8-fb43896cf021-kube-api-access-5gb8x\") pod \"kube-state-metrics-0\" (UID: \"f8d262fe-fd03-43eb-a9d8-fb43896cf021\") " pod="openstack/kube-state-metrics-0" Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.822050 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gb8x\" (UniqueName: \"kubernetes.io/projected/f8d262fe-fd03-43eb-a9d8-fb43896cf021-kube-api-access-5gb8x\") pod \"kube-state-metrics-0\" (UID: \"f8d262fe-fd03-43eb-a9d8-fb43896cf021\") " pod="openstack/kube-state-metrics-0" Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.849428 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gb8x\" (UniqueName: \"kubernetes.io/projected/f8d262fe-fd03-43eb-a9d8-fb43896cf021-kube-api-access-5gb8x\") pod \"kube-state-metrics-0\" (UID: \"f8d262fe-fd03-43eb-a9d8-fb43896cf021\") " pod="openstack/kube-state-metrics-0" Nov 25 12:44:07 crc kubenswrapper[4675]: I1125 12:44:07.911768 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 12:44:08 crc kubenswrapper[4675]: I1125 12:44:08.772233 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:44:09 crc kubenswrapper[4675]: I1125 12:44:09.785359 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8d262fe-fd03-43eb-a9d8-fb43896cf021","Type":"ContainerStarted","Data":"a6c1281775c392defb4b452dec72494e5eab487fd72d60a75f740a06b75c11d3"} Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.929012 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vgv4n"] Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.930527 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.932383 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.932627 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-9w8kl" Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.948298 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.965874 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgv4n"] Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.992524 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-zx9pg"] Nov 25 12:44:10 crc kubenswrapper[4675]: I1125 12:44:10.994690 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007002 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6v4l\" (UniqueName: \"kubernetes.io/projected/738ca4c6-0239-497f-aa30-001f7a06bf41-kube-api-access-m6v4l\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007064 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-run\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007108 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-run-ovn\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007142 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-run\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007168 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-etc-ovs\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007196 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-lib\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007226 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws44s\" (UniqueName: \"kubernetes.io/projected/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-kube-api-access-ws44s\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007252 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-scripts\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007289 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-combined-ca-bundle\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007338 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/738ca4c6-0239-497f-aa30-001f7a06bf41-scripts\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007371 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-log-ovn\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007390 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-ovn-controller-tls-certs\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.007603 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-log\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.034846 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-zx9pg"] Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108598 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-run\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108671 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-run-ovn\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108704 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-run\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108732 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-etc-ovs\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108759 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-lib\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108793 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws44s\" (UniqueName: \"kubernetes.io/projected/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-kube-api-access-ws44s\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108845 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-scripts\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108882 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-combined-ca-bundle\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108923 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/738ca4c6-0239-497f-aa30-001f7a06bf41-scripts\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108955 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-log-ovn\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.108977 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-ovn-controller-tls-certs\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.109006 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-log\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.109026 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6v4l\" (UniqueName: \"kubernetes.io/projected/738ca4c6-0239-497f-aa30-001f7a06bf41-kube-api-access-m6v4l\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.109186 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-run-ovn\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.109256 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-run\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.109418 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-log-ovn\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.110492 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-etc-ovs\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.110558 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-var-run\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.110653 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-log\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.110796 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/738ca4c6-0239-497f-aa30-001f7a06bf41-var-lib\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.113301 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/738ca4c6-0239-497f-aa30-001f7a06bf41-scripts\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.116459 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-ovn-controller-tls-certs\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.118671 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-combined-ca-bundle\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.129883 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws44s\" (UniqueName: \"kubernetes.io/projected/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-kube-api-access-ws44s\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.134472 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6v4l\" (UniqueName: \"kubernetes.io/projected/738ca4c6-0239-497f-aa30-001f7a06bf41-kube-api-access-m6v4l\") pod \"ovn-controller-ovs-zx9pg\" (UID: \"738ca4c6-0239-497f-aa30-001f7a06bf41\") " pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.188661 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd0994da-34e6-4f4c-b8a5-cae4c7923df7-scripts\") pod \"ovn-controller-vgv4n\" (UID: \"bd0994da-34e6-4f4c-b8a5-cae4c7923df7\") " pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.270407 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.321708 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.813168 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.814454 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.817416 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.817651 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.817804 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-rmch4" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.817954 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.818057 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823276 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c215d8eb-d320-4245-8bdb-73b0d600ea49-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823317 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823366 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c215d8eb-d320-4245-8bdb-73b0d600ea49-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823410 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c215d8eb-d320-4245-8bdb-73b0d600ea49-config\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823480 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823502 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdlcz\" (UniqueName: \"kubernetes.io/projected/c215d8eb-d320-4245-8bdb-73b0d600ea49-kube-api-access-kdlcz\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823531 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.823560 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.829226 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.924990 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c215d8eb-d320-4245-8bdb-73b0d600ea49-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.925055 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c215d8eb-d320-4245-8bdb-73b0d600ea49-config\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.925092 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.925112 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdlcz\" (UniqueName: \"kubernetes.io/projected/c215d8eb-d320-4245-8bdb-73b0d600ea49-kube-api-access-kdlcz\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.925391 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.925418 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.925778 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.926782 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c215d8eb-d320-4245-8bdb-73b0d600ea49-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.926852 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.926864 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c215d8eb-d320-4245-8bdb-73b0d600ea49-config\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.927958 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c215d8eb-d320-4245-8bdb-73b0d600ea49-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.928927 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c215d8eb-d320-4245-8bdb-73b0d600ea49-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.932181 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.933506 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.933576 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c215d8eb-d320-4245-8bdb-73b0d600ea49-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.944623 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdlcz\" (UniqueName: \"kubernetes.io/projected/c215d8eb-d320-4245-8bdb-73b0d600ea49-kube-api-access-kdlcz\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:11 crc kubenswrapper[4675]: I1125 12:44:11.963595 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c215d8eb-d320-4245-8bdb-73b0d600ea49\") " pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:12 crc kubenswrapper[4675]: I1125 12:44:12.061448 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgv4n"] Nov 25 12:44:12 crc kubenswrapper[4675]: I1125 12:44:12.151729 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:12 crc kubenswrapper[4675]: I1125 12:44:12.699617 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-zx9pg"] Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.812039 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-68qlk"] Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.813054 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.816057 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.853297 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-68qlk"] Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.981552 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6ef75e87-29e7-4d11-9547-430df2247d7b-ovn-rundir\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.981622 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6ef75e87-29e7-4d11-9547-430df2247d7b-ovs-rundir\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.981710 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef75e87-29e7-4d11-9547-430df2247d7b-config\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.981753 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zlw5\" (UniqueName: \"kubernetes.io/projected/6ef75e87-29e7-4d11-9547-430df2247d7b-kube-api-access-2zlw5\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.981785 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef75e87-29e7-4d11-9547-430df2247d7b-combined-ca-bundle\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:13 crc kubenswrapper[4675]: I1125 12:44:13.982203 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef75e87-29e7-4d11-9547-430df2247d7b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.084765 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6ef75e87-29e7-4d11-9547-430df2247d7b-ovn-rundir\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.084840 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6ef75e87-29e7-4d11-9547-430df2247d7b-ovs-rundir\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.084878 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef75e87-29e7-4d11-9547-430df2247d7b-config\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.084905 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zlw5\" (UniqueName: \"kubernetes.io/projected/6ef75e87-29e7-4d11-9547-430df2247d7b-kube-api-access-2zlw5\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.084929 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef75e87-29e7-4d11-9547-430df2247d7b-combined-ca-bundle\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.084977 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef75e87-29e7-4d11-9547-430df2247d7b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.085254 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6ef75e87-29e7-4d11-9547-430df2247d7b-ovs-rundir\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.085343 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6ef75e87-29e7-4d11-9547-430df2247d7b-ovn-rundir\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.086873 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef75e87-29e7-4d11-9547-430df2247d7b-config\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.090210 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef75e87-29e7-4d11-9547-430df2247d7b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.091656 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef75e87-29e7-4d11-9547-430df2247d7b-combined-ca-bundle\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.121285 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zlw5\" (UniqueName: \"kubernetes.io/projected/6ef75e87-29e7-4d11-9547-430df2247d7b-kube-api-access-2zlw5\") pod \"ovn-controller-metrics-68qlk\" (UID: \"6ef75e87-29e7-4d11-9547-430df2247d7b\") " pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:14 crc kubenswrapper[4675]: I1125 12:44:14.145935 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-68qlk" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.051722 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.053624 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.055984 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.056397 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.056692 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-hhzwc" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.056779 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.058953 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.204294 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp2gq\" (UniqueName: \"kubernetes.io/projected/f36454cf-1208-4320-8a1d-8df0afad3983-kube-api-access-rp2gq\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.204402 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f36454cf-1208-4320-8a1d-8df0afad3983-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.204509 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.204564 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f36454cf-1208-4320-8a1d-8df0afad3983-config\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.204587 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.204603 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.205003 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f36454cf-1208-4320-8a1d-8df0afad3983-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.205914 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307006 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307073 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f36454cf-1208-4320-8a1d-8df0afad3983-config\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307107 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307129 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307213 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f36454cf-1208-4320-8a1d-8df0afad3983-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307263 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307291 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp2gq\" (UniqueName: \"kubernetes.io/projected/f36454cf-1208-4320-8a1d-8df0afad3983-kube-api-access-rp2gq\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.307335 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f36454cf-1208-4320-8a1d-8df0afad3983-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.308167 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f36454cf-1208-4320-8a1d-8df0afad3983-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.308446 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.308608 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f36454cf-1208-4320-8a1d-8df0afad3983-config\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.309534 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f36454cf-1208-4320-8a1d-8df0afad3983-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.313470 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.314306 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.317522 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f36454cf-1208-4320-8a1d-8df0afad3983-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.327375 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp2gq\" (UniqueName: \"kubernetes.io/projected/f36454cf-1208-4320-8a1d-8df0afad3983-kube-api-access-rp2gq\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.330619 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f36454cf-1208-4320-8a1d-8df0afad3983\") " pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:15 crc kubenswrapper[4675]: I1125 12:44:15.381709 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:16 crc kubenswrapper[4675]: W1125 12:44:16.629999 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod738ca4c6_0239_497f_aa30_001f7a06bf41.slice/crio-9e3c1a33e8b48100cd817c1a2d6ab7d54baaf1c6332c22e0bb016c87426f0c87 WatchSource:0}: Error finding container 9e3c1a33e8b48100cd817c1a2d6ab7d54baaf1c6332c22e0bb016c87426f0c87: Status 404 returned error can't find the container with id 9e3c1a33e8b48100cd817c1a2d6ab7d54baaf1c6332c22e0bb016c87426f0c87 Nov 25 12:44:16 crc kubenswrapper[4675]: I1125 12:44:16.854209 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zx9pg" event={"ID":"738ca4c6-0239-497f-aa30-001f7a06bf41","Type":"ContainerStarted","Data":"9e3c1a33e8b48100cd817c1a2d6ab7d54baaf1c6332c22e0bb016c87426f0c87"} Nov 25 12:44:16 crc kubenswrapper[4675]: I1125 12:44:16.855681 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n" event={"ID":"bd0994da-34e6-4f4c-b8a5-cae4c7923df7","Type":"ContainerStarted","Data":"2842ecd496a9dfdeab3d207ba34b0b2a28cc5257edac1e7d0e653b8675f7ab6d"} Nov 25 12:44:25 crc kubenswrapper[4675]: E1125 12:44:25.969620 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 25 12:44:25 crc kubenswrapper[4675]: E1125 12:44:25.970537 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qk2b9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(c8756ef3-0fbe-457a-93ed-957baf6a60da): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:25 crc kubenswrapper[4675]: E1125 12:44:25.971807 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="c8756ef3-0fbe-457a-93ed-957baf6a60da" Nov 25 12:44:26 crc kubenswrapper[4675]: E1125 12:44:26.924296 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="c8756ef3-0fbe-457a-93ed-957baf6a60da" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.034209 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.034412 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cjt7z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(24ebc7c8-8b87-487b-90cb-7c26a047b956): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.035607 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.046768 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.046911 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.046948 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bx9q9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(3d992e9b-ee07-4194-90de-02816b3aec1e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.047064 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qd8gq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(ca5edac0-6e16-45e1-8d9e-8cec8479bd8b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.048162 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="3d992e9b-ee07-4194-90de-02816b3aec1e" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.048251 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.929485 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.931575 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="3d992e9b-ee07-4194-90de-02816b3aec1e" Nov 25 12:44:27 crc kubenswrapper[4675]: E1125 12:44:27.931616 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" Nov 25 12:44:28 crc kubenswrapper[4675]: E1125 12:44:28.647012 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 25 12:44:28 crc kubenswrapper[4675]: E1125 12:44:28.647280 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n685h8h55fh56chc6h59bhd5h694h558h9dh8bh5cdhfch74h579h6fh5d8hc5hfch5bchc4h9bh556h658hb4h586hfh57chc6h545h67bh5c7q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d4q8z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(6efda04f-52f8-48b1-9afd-f606c3a72d50): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:28 crc kubenswrapper[4675]: E1125 12:44:28.649387 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="6efda04f-52f8-48b1-9afd-f606c3a72d50" Nov 25 12:44:28 crc kubenswrapper[4675]: E1125 12:44:28.935872 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="6efda04f-52f8-48b1-9afd-f606c3a72d50" Nov 25 12:44:29 crc kubenswrapper[4675]: I1125 12:44:29.132160 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 12:44:34 crc kubenswrapper[4675]: W1125 12:44:34.128273 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc215d8eb_d320_4245_8bdb_73b0d600ea49.slice/crio-89c6d8eb6b8da2d099bfba7daac4000aaf9c9c63231700ad567e880fd03d3418 WatchSource:0}: Error finding container 89c6d8eb6b8da2d099bfba7daac4000aaf9c9c63231700ad567e880fd03d3418: Status 404 returned error can't find the container with id 89c6d8eb6b8da2d099bfba7daac4000aaf9c9c63231700ad567e880fd03d3418 Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.927080 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.927272 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5vlw4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-5lrf2_openstack(d81b8eb2-3df1-4aa8-82f6-67af08769677): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.928471 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" podUID="d81b8eb2-3df1-4aa8-82f6-67af08769677" Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.937716 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.937867 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pp4sn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-zqm4p_openstack(b2df226c-65e0-4517-9e9c-dab78843423f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.938959 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" podUID="b2df226c-65e0-4517-9e9c-dab78843423f" Nov 25 12:44:34 crc kubenswrapper[4675]: I1125 12:44:34.973551 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c215d8eb-d320-4245-8bdb-73b0d600ea49","Type":"ContainerStarted","Data":"89c6d8eb6b8da2d099bfba7daac4000aaf9c9c63231700ad567e880fd03d3418"} Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.975400 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" podUID="b2df226c-65e0-4517-9e9c-dab78843423f" Nov 25 12:44:34 crc kubenswrapper[4675]: E1125 12:44:34.975601 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" podUID="d81b8eb2-3df1-4aa8-82f6-67af08769677" Nov 25 12:44:35 crc kubenswrapper[4675]: E1125 12:44:35.120964 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 12:44:35 crc kubenswrapper[4675]: E1125 12:44:35.121373 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ckzj7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-bsfkw_openstack(36fb8e78-9c2b-4891-aa20-cdd6695e857f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:35 crc kubenswrapper[4675]: E1125 12:44:35.122654 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" podUID="36fb8e78-9c2b-4891-aa20-cdd6695e857f" Nov 25 12:44:35 crc kubenswrapper[4675]: E1125 12:44:35.128631 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 12:44:35 crc kubenswrapper[4675]: E1125 12:44:35.128825 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z82xh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-rr675_openstack(54347346-7778-4546-98cd-e57f464f0bff): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:44:35 crc kubenswrapper[4675]: E1125 12:44:35.130521 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" podUID="54347346-7778-4546-98cd-e57f464f0bff" Nov 25 12:44:35 crc kubenswrapper[4675]: I1125 12:44:35.494948 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-68qlk"] Nov 25 12:44:35 crc kubenswrapper[4675]: I1125 12:44:35.858161 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 12:44:35 crc kubenswrapper[4675]: W1125 12:44:35.937677 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf36454cf_1208_4320_8a1d_8df0afad3983.slice/crio-1fd474313f047caf29fee6f72fdd4a4859b33328ecb35be9f154631796a56653 WatchSource:0}: Error finding container 1fd474313f047caf29fee6f72fdd4a4859b33328ecb35be9f154631796a56653: Status 404 returned error can't find the container with id 1fd474313f047caf29fee6f72fdd4a4859b33328ecb35be9f154631796a56653 Nov 25 12:44:35 crc kubenswrapper[4675]: I1125 12:44:35.980202 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f36454cf-1208-4320-8a1d-8df0afad3983","Type":"ContainerStarted","Data":"1fd474313f047caf29fee6f72fdd4a4859b33328ecb35be9f154631796a56653"} Nov 25 12:44:35 crc kubenswrapper[4675]: I1125 12:44:35.981713 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-68qlk" event={"ID":"6ef75e87-29e7-4d11-9547-430df2247d7b","Type":"ContainerStarted","Data":"3958c557156587bf480a97e275c8567200ebc10788a7629b74ed591f6ffc4741"} Nov 25 12:44:36 crc kubenswrapper[4675]: E1125 12:44:36.793248 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 25 12:44:36 crc kubenswrapper[4675]: E1125 12:44:36.793765 4675 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 25 12:44:36 crc kubenswrapper[4675]: E1125 12:44:36.793917 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5gb8x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(f8d262fe-fd03-43eb-a9d8-fb43896cf021): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 12:44:36 crc kubenswrapper[4675]: E1125 12:44:36.795446 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.807650 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.812205 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.883551 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z82xh\" (UniqueName: \"kubernetes.io/projected/54347346-7778-4546-98cd-e57f464f0bff-kube-api-access-z82xh\") pod \"54347346-7778-4546-98cd-e57f464f0bff\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.883766 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54347346-7778-4546-98cd-e57f464f0bff-config\") pod \"54347346-7778-4546-98cd-e57f464f0bff\" (UID: \"54347346-7778-4546-98cd-e57f464f0bff\") " Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.883831 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-config\") pod \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.883860 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-dns-svc\") pod \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.883974 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckzj7\" (UniqueName: \"kubernetes.io/projected/36fb8e78-9c2b-4891-aa20-cdd6695e857f-kube-api-access-ckzj7\") pod \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\" (UID: \"36fb8e78-9c2b-4891-aa20-cdd6695e857f\") " Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.884370 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54347346-7778-4546-98cd-e57f464f0bff-config" (OuterVolumeSpecName: "config") pod "54347346-7778-4546-98cd-e57f464f0bff" (UID: "54347346-7778-4546-98cd-e57f464f0bff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.884469 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "36fb8e78-9c2b-4891-aa20-cdd6695e857f" (UID: "36fb8e78-9c2b-4891-aa20-cdd6695e857f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.884577 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-config" (OuterVolumeSpecName: "config") pod "36fb8e78-9c2b-4891-aa20-cdd6695e857f" (UID: "36fb8e78-9c2b-4891-aa20-cdd6695e857f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.892865 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36fb8e78-9c2b-4891-aa20-cdd6695e857f-kube-api-access-ckzj7" (OuterVolumeSpecName: "kube-api-access-ckzj7") pod "36fb8e78-9c2b-4891-aa20-cdd6695e857f" (UID: "36fb8e78-9c2b-4891-aa20-cdd6695e857f"). InnerVolumeSpecName "kube-api-access-ckzj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.893570 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54347346-7778-4546-98cd-e57f464f0bff-kube-api-access-z82xh" (OuterVolumeSpecName: "kube-api-access-z82xh") pod "54347346-7778-4546-98cd-e57f464f0bff" (UID: "54347346-7778-4546-98cd-e57f464f0bff"). InnerVolumeSpecName "kube-api-access-z82xh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.984945 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54347346-7778-4546-98cd-e57f464f0bff-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.984970 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.984980 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36fb8e78-9c2b-4891-aa20-cdd6695e857f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.984989 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckzj7\" (UniqueName: \"kubernetes.io/projected/36fb8e78-9c2b-4891-aa20-cdd6695e857f-kube-api-access-ckzj7\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.984999 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z82xh\" (UniqueName: \"kubernetes.io/projected/54347346-7778-4546-98cd-e57f464f0bff-kube-api-access-z82xh\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.988545 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" event={"ID":"36fb8e78-9c2b-4891-aa20-cdd6695e857f","Type":"ContainerDied","Data":"611bab1be05d8051f16c474b73ee2de870b63b25cdc7706cd171c8877e0ca956"} Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.988841 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsfkw" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.989717 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" event={"ID":"54347346-7778-4546-98cd-e57f464f0bff","Type":"ContainerDied","Data":"f1268d3fad6be7e275a325b395e1b6178382e589f848ad4c7fba6a63e281060b"} Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.989803 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rr675" Nov 25 12:44:36 crc kubenswrapper[4675]: I1125 12:44:36.993243 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c215d8eb-d320-4245-8bdb-73b0d600ea49","Type":"ContainerStarted","Data":"bb397aa57fa7867c9eddc2690ff56f0f1c4cb8ccbc906f74c524afa94983ec02"} Nov 25 12:44:36 crc kubenswrapper[4675]: E1125 12:44:36.996610 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" Nov 25 12:44:37 crc kubenswrapper[4675]: I1125 12:44:37.082462 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rr675"] Nov 25 12:44:37 crc kubenswrapper[4675]: I1125 12:44:37.100979 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rr675"] Nov 25 12:44:37 crc kubenswrapper[4675]: I1125 12:44:37.113423 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsfkw"] Nov 25 12:44:37 crc kubenswrapper[4675]: I1125 12:44:37.117113 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsfkw"] Nov 25 12:44:37 crc kubenswrapper[4675]: I1125 12:44:37.544982 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36fb8e78-9c2b-4891-aa20-cdd6695e857f" path="/var/lib/kubelet/pods/36fb8e78-9c2b-4891-aa20-cdd6695e857f/volumes" Nov 25 12:44:37 crc kubenswrapper[4675]: I1125 12:44:37.545700 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54347346-7778-4546-98cd-e57f464f0bff" path="/var/lib/kubelet/pods/54347346-7778-4546-98cd-e57f464f0bff/volumes" Nov 25 12:44:38 crc kubenswrapper[4675]: I1125 12:44:38.001943 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n" event={"ID":"bd0994da-34e6-4f4c-b8a5-cae4c7923df7","Type":"ContainerStarted","Data":"0ae29320e8ec97eec7b72662f93910c51f4127906a9a3024538e7ae7e7ce964a"} Nov 25 12:44:38 crc kubenswrapper[4675]: I1125 12:44:38.002078 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-vgv4n" Nov 25 12:44:38 crc kubenswrapper[4675]: I1125 12:44:38.005086 4675 generic.go:334] "Generic (PLEG): container finished" podID="738ca4c6-0239-497f-aa30-001f7a06bf41" containerID="52c322c2488b056ef3014b73ef7d24bb63b148ef847f3133d373ede6980b833e" exitCode=0 Nov 25 12:44:38 crc kubenswrapper[4675]: I1125 12:44:38.005124 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zx9pg" event={"ID":"738ca4c6-0239-497f-aa30-001f7a06bf41","Type":"ContainerDied","Data":"52c322c2488b056ef3014b73ef7d24bb63b148ef847f3133d373ede6980b833e"} Nov 25 12:44:38 crc kubenswrapper[4675]: I1125 12:44:38.022325 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-vgv4n" podStartSLOduration=7.899000095 podStartE2EDuration="28.022310377s" podCreationTimestamp="2025-11-25 12:44:10 +0000 UTC" firstStartedPulling="2025-11-25 12:44:16.083521098 +0000 UTC m=+1001.255113439" lastFinishedPulling="2025-11-25 12:44:36.20683138 +0000 UTC m=+1021.378423721" observedRunningTime="2025-11-25 12:44:38.019649192 +0000 UTC m=+1023.191241533" watchObservedRunningTime="2025-11-25 12:44:38.022310377 +0000 UTC m=+1023.193902708" Nov 25 12:44:40 crc kubenswrapper[4675]: I1125 12:44:40.023730 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zx9pg" event={"ID":"738ca4c6-0239-497f-aa30-001f7a06bf41","Type":"ContainerStarted","Data":"3ee7a10f1d4be922702ea1a52f9b5b528029ca14e2db1175fefe874f569890ce"} Nov 25 12:44:40 crc kubenswrapper[4675]: I1125 12:44:40.024150 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:40 crc kubenswrapper[4675]: I1125 12:44:40.024168 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zx9pg" event={"ID":"738ca4c6-0239-497f-aa30-001f7a06bf41","Type":"ContainerStarted","Data":"ad554965138cc966abb8ccd1eacc67f862ef15738c9967d0a7bacf7b0280ee2f"} Nov 25 12:44:40 crc kubenswrapper[4675]: I1125 12:44:40.047503 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-zx9pg" podStartSLOduration=10.752506076 podStartE2EDuration="30.047481044s" podCreationTimestamp="2025-11-25 12:44:10 +0000 UTC" firstStartedPulling="2025-11-25 12:44:16.632914869 +0000 UTC m=+1001.804507210" lastFinishedPulling="2025-11-25 12:44:35.927889837 +0000 UTC m=+1021.099482178" observedRunningTime="2025-11-25 12:44:40.040289102 +0000 UTC m=+1025.211881443" watchObservedRunningTime="2025-11-25 12:44:40.047481044 +0000 UTC m=+1025.219073385" Nov 25 12:44:41 crc kubenswrapper[4675]: I1125 12:44:41.034739 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f36454cf-1208-4320-8a1d-8df0afad3983","Type":"ContainerStarted","Data":"52a6afb00979544b6affb32fbc18b8f74f137b7b2ae508f4666ca153ef2291ec"} Nov 25 12:44:41 crc kubenswrapper[4675]: I1125 12:44:41.035376 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:44:42 crc kubenswrapper[4675]: I1125 12:44:42.044983 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3d992e9b-ee07-4194-90de-02816b3aec1e","Type":"ContainerStarted","Data":"52acd98f6f12848684714059c5942ef85ef33bd5d11095162fdf779aedd903c4"} Nov 25 12:44:42 crc kubenswrapper[4675]: I1125 12:44:42.047473 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"24ebc7c8-8b87-487b-90cb-7c26a047b956","Type":"ContainerStarted","Data":"9172f56922c897a590a4b50e195c278eae03e0cde37d71e1e27ae75d11b847eb"} Nov 25 12:44:42 crc kubenswrapper[4675]: I1125 12:44:42.049120 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c8756ef3-0fbe-457a-93ed-957baf6a60da","Type":"ContainerStarted","Data":"313f271c125b5fd6fe3ddd9bb050c0fd9a24da210d0b334d3f1ecb09b02728ff"} Nov 25 12:44:42 crc kubenswrapper[4675]: I1125 12:44:42.051447 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b","Type":"ContainerStarted","Data":"440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a"} Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.075747 4675 generic.go:334] "Generic (PLEG): container finished" podID="3d992e9b-ee07-4194-90de-02816b3aec1e" containerID="52acd98f6f12848684714059c5942ef85ef33bd5d11095162fdf779aedd903c4" exitCode=0 Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.075868 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3d992e9b-ee07-4194-90de-02816b3aec1e","Type":"ContainerDied","Data":"52acd98f6f12848684714059c5942ef85ef33bd5d11095162fdf779aedd903c4"} Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.079850 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c215d8eb-d320-4245-8bdb-73b0d600ea49","Type":"ContainerStarted","Data":"62f977d25337db6d068bc668581af0f9cef993c41585278cccaf4cae6168e54b"} Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.082691 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6efda04f-52f8-48b1-9afd-f606c3a72d50","Type":"ContainerStarted","Data":"62ef790eeeee48086db1df7831cf7d01ab5c8fc06919b977fe30849cd3e35eef"} Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.083145 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.085058 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f36454cf-1208-4320-8a1d-8df0afad3983","Type":"ContainerStarted","Data":"9ada81987e57ef996d7d5f03dd1524c1e2434eb547e8853166d2399d2d65ba8a"} Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.087324 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-68qlk" event={"ID":"6ef75e87-29e7-4d11-9547-430df2247d7b","Type":"ContainerStarted","Data":"b774df528a824f987a1863a569468872f1cb656141315eb26269f13cafe81bfd"} Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.130783 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.284613732 podStartE2EDuration="40.130760341s" podCreationTimestamp="2025-11-25 12:44:05 +0000 UTC" firstStartedPulling="2025-11-25 12:44:06.605007678 +0000 UTC m=+991.776600019" lastFinishedPulling="2025-11-25 12:44:44.451154287 +0000 UTC m=+1029.622746628" observedRunningTime="2025-11-25 12:44:45.122090813 +0000 UTC m=+1030.293683204" watchObservedRunningTime="2025-11-25 12:44:45.130760341 +0000 UTC m=+1030.302352712" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.150891 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=22.849074895 podStartE2EDuration="31.150868368s" podCreationTimestamp="2025-11-25 12:44:14 +0000 UTC" firstStartedPulling="2025-11-25 12:44:35.940447681 +0000 UTC m=+1021.112040022" lastFinishedPulling="2025-11-25 12:44:44.242241154 +0000 UTC m=+1029.413833495" observedRunningTime="2025-11-25 12:44:45.150487145 +0000 UTC m=+1030.322079576" watchObservedRunningTime="2025-11-25 12:44:45.150868368 +0000 UTC m=+1030.322460749" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.153021 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.210825 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-68qlk" podStartSLOduration=23.912695627 podStartE2EDuration="32.210795042s" podCreationTimestamp="2025-11-25 12:44:13 +0000 UTC" firstStartedPulling="2025-11-25 12:44:35.814072461 +0000 UTC m=+1020.985664802" lastFinishedPulling="2025-11-25 12:44:44.112171866 +0000 UTC m=+1029.283764217" observedRunningTime="2025-11-25 12:44:45.186891275 +0000 UTC m=+1030.358483646" watchObservedRunningTime="2025-11-25 12:44:45.210795042 +0000 UTC m=+1030.382387383" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.227858 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=25.016335849 podStartE2EDuration="35.227809969s" podCreationTimestamp="2025-11-25 12:44:10 +0000 UTC" firstStartedPulling="2025-11-25 12:44:34.130297523 +0000 UTC m=+1019.301889864" lastFinishedPulling="2025-11-25 12:44:44.341771643 +0000 UTC m=+1029.513363984" observedRunningTime="2025-11-25 12:44:45.224037069 +0000 UTC m=+1030.395629430" watchObservedRunningTime="2025-11-25 12:44:45.227809969 +0000 UTC m=+1030.399402310" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.229478 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.381989 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.382319 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.433450 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.438695 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5lrf2"] Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.496065 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-jpxg6"] Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.497285 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.503448 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.541631 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-jpxg6"] Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.596395 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.596740 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.596893 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-config\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.597043 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpj79\" (UniqueName: \"kubernetes.io/projected/47599dee-e400-48de-a84e-781c9c2bc826-kube-api-access-qpj79\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.700111 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.701212 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.701245 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-config\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.701270 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpj79\" (UniqueName: \"kubernetes.io/projected/47599dee-e400-48de-a84e-781c9c2bc826-kube-api-access-qpj79\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.701150 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.702186 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.702747 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-config\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.728486 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-zqm4p"] Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.749518 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpj79\" (UniqueName: \"kubernetes.io/projected/47599dee-e400-48de-a84e-781c9c2bc826-kube-api-access-qpj79\") pod \"dnsmasq-dns-7fd796d7df-jpxg6\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.806296 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-mbn5b"] Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.807997 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.810630 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.819841 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.850981 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-mbn5b"] Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.903807 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.903900 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.903925 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.903951 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgsnm\" (UniqueName: \"kubernetes.io/projected/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-kube-api-access-bgsnm\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.903992 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-config\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:45 crc kubenswrapper[4675]: I1125 12:44:45.940952 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.007720 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.007761 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.007786 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgsnm\" (UniqueName: \"kubernetes.io/projected/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-kube-api-access-bgsnm\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.007839 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-config\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.007926 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.009272 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.009767 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.009784 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-config\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.011104 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.026771 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgsnm\" (UniqueName: \"kubernetes.io/projected/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-kube-api-access-bgsnm\") pod \"dnsmasq-dns-86db49b7ff-mbn5b\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.102615 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" event={"ID":"d81b8eb2-3df1-4aa8-82f6-67af08769677","Type":"ContainerDied","Data":"9f34d02f8f00e23a317171c848abf6486950e5206a22a49fb7bd11db05ff4e69"} Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.102702 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5lrf2" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.107050 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3d992e9b-ee07-4194-90de-02816b3aec1e","Type":"ContainerStarted","Data":"ae90d73b0ea166f6087a1c7a938fd31eea4d6f7d76d9ea2f842109e85602daf7"} Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.109118 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-dns-svc\") pod \"d81b8eb2-3df1-4aa8-82f6-67af08769677\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.109165 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-config\") pod \"d81b8eb2-3df1-4aa8-82f6-67af08769677\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.109230 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vlw4\" (UniqueName: \"kubernetes.io/projected/d81b8eb2-3df1-4aa8-82f6-67af08769677-kube-api-access-5vlw4\") pod \"d81b8eb2-3df1-4aa8-82f6-67af08769677\" (UID: \"d81b8eb2-3df1-4aa8-82f6-67af08769677\") " Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.111426 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d81b8eb2-3df1-4aa8-82f6-67af08769677" (UID: "d81b8eb2-3df1-4aa8-82f6-67af08769677"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.112051 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-config" (OuterVolumeSpecName: "config") pod "d81b8eb2-3df1-4aa8-82f6-67af08769677" (UID: "d81b8eb2-3df1-4aa8-82f6-67af08769677"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.116950 4675 generic.go:334] "Generic (PLEG): container finished" podID="c8756ef3-0fbe-457a-93ed-957baf6a60da" containerID="313f271c125b5fd6fe3ddd9bb050c0fd9a24da210d0b334d3f1ecb09b02728ff" exitCode=0 Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.118410 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c8756ef3-0fbe-457a-93ed-957baf6a60da","Type":"ContainerDied","Data":"313f271c125b5fd6fe3ddd9bb050c0fd9a24da210d0b334d3f1ecb09b02728ff"} Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.118452 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.120956 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d81b8eb2-3df1-4aa8-82f6-67af08769677-kube-api-access-5vlw4" (OuterVolumeSpecName: "kube-api-access-5vlw4") pod "d81b8eb2-3df1-4aa8-82f6-67af08769677" (UID: "d81b8eb2-3df1-4aa8-82f6-67af08769677"). InnerVolumeSpecName "kube-api-access-5vlw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.154600 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.12035035 podStartE2EDuration="44.154534753s" podCreationTimestamp="2025-11-25 12:44:02 +0000 UTC" firstStartedPulling="2025-11-25 12:44:04.928135733 +0000 UTC m=+990.099728074" lastFinishedPulling="2025-11-25 12:44:40.962320136 +0000 UTC m=+1026.133912477" observedRunningTime="2025-11-25 12:44:46.145305658 +0000 UTC m=+1031.316898009" watchObservedRunningTime="2025-11-25 12:44:46.154534753 +0000 UTC m=+1031.326127114" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.159504 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.196151 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.203947 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.212090 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.212310 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d81b8eb2-3df1-4aa8-82f6-67af08769677-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.212417 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vlw4\" (UniqueName: \"kubernetes.io/projected/d81b8eb2-3df1-4aa8-82f6-67af08769677-kube-api-access-5vlw4\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.278193 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.313187 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp4sn\" (UniqueName: \"kubernetes.io/projected/b2df226c-65e0-4517-9e9c-dab78843423f-kube-api-access-pp4sn\") pod \"b2df226c-65e0-4517-9e9c-dab78843423f\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.313301 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-config\") pod \"b2df226c-65e0-4517-9e9c-dab78843423f\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.313331 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-dns-svc\") pod \"b2df226c-65e0-4517-9e9c-dab78843423f\" (UID: \"b2df226c-65e0-4517-9e9c-dab78843423f\") " Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.315452 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b2df226c-65e0-4517-9e9c-dab78843423f" (UID: "b2df226c-65e0-4517-9e9c-dab78843423f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.317099 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-config" (OuterVolumeSpecName: "config") pod "b2df226c-65e0-4517-9e9c-dab78843423f" (UID: "b2df226c-65e0-4517-9e9c-dab78843423f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.329025 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2df226c-65e0-4517-9e9c-dab78843423f-kube-api-access-pp4sn" (OuterVolumeSpecName: "kube-api-access-pp4sn") pod "b2df226c-65e0-4517-9e9c-dab78843423f" (UID: "b2df226c-65e0-4517-9e9c-dab78843423f"). InnerVolumeSpecName "kube-api-access-pp4sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.396888 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-jpxg6"] Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.415231 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp4sn\" (UniqueName: \"kubernetes.io/projected/b2df226c-65e0-4517-9e9c-dab78843423f-kube-api-access-pp4sn\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.415282 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.415295 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2df226c-65e0-4517-9e9c-dab78843423f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.474434 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5lrf2"] Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.485956 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5lrf2"] Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.643710 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-mbn5b"] Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.668145 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.669399 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: W1125 12:44:46.672163 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00b2e92d_156b_43e4_8f42_b4fa7e9e468f.slice/crio-9b321a0fd00da867b49d28122233dfa868ea5ecafd9a1bc1114d437b1a5e1c48 WatchSource:0}: Error finding container 9b321a0fd00da867b49d28122233dfa868ea5ecafd9a1bc1114d437b1a5e1c48: Status 404 returned error can't find the container with id 9b321a0fd00da867b49d28122233dfa868ea5ecafd9a1bc1114d437b1a5e1c48 Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.683195 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.683504 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.683686 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.683831 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-6tx6c" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.683948 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720027 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720089 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d2d15dcc-e29e-4b04-8de0-911cc8190e33-scripts\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720150 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720170 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720185 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2d15dcc-e29e-4b04-8de0-911cc8190e33-config\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720208 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d2d15dcc-e29e-4b04-8de0-911cc8190e33-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.720254 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc2hc\" (UniqueName: \"kubernetes.io/projected/d2d15dcc-e29e-4b04-8de0-911cc8190e33-kube-api-access-zc2hc\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822041 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822098 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822126 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2d15dcc-e29e-4b04-8de0-911cc8190e33-config\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822164 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d2d15dcc-e29e-4b04-8de0-911cc8190e33-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822216 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc2hc\" (UniqueName: \"kubernetes.io/projected/d2d15dcc-e29e-4b04-8de0-911cc8190e33-kube-api-access-zc2hc\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822280 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.822310 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d2d15dcc-e29e-4b04-8de0-911cc8190e33-scripts\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.823601 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d2d15dcc-e29e-4b04-8de0-911cc8190e33-scripts\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.824277 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d2d15dcc-e29e-4b04-8de0-911cc8190e33-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.825442 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2d15dcc-e29e-4b04-8de0-911cc8190e33-config\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.829639 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.829725 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.830257 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2d15dcc-e29e-4b04-8de0-911cc8190e33-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:46 crc kubenswrapper[4675]: I1125 12:44:46.841684 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc2hc\" (UniqueName: \"kubernetes.io/projected/d2d15dcc-e29e-4b04-8de0-911cc8190e33-kube-api-access-zc2hc\") pod \"ovn-northd-0\" (UID: \"d2d15dcc-e29e-4b04-8de0-911cc8190e33\") " pod="openstack/ovn-northd-0" Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.020854 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.126093 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c8756ef3-0fbe-457a-93ed-957baf6a60da","Type":"ContainerStarted","Data":"7d8ae0e2fd8ab703b7fee15fd05a909c5cb8123c28eccf68590067330b21f392"} Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.128246 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" event={"ID":"00b2e92d-156b-43e4-8f42-b4fa7e9e468f","Type":"ContainerStarted","Data":"9b321a0fd00da867b49d28122233dfa868ea5ecafd9a1bc1114d437b1a5e1c48"} Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.130438 4675 generic.go:334] "Generic (PLEG): container finished" podID="47599dee-e400-48de-a84e-781c9c2bc826" containerID="5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674" exitCode=0 Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.130486 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" event={"ID":"47599dee-e400-48de-a84e-781c9c2bc826","Type":"ContainerDied","Data":"5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674"} Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.130505 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" event={"ID":"47599dee-e400-48de-a84e-781c9c2bc826","Type":"ContainerStarted","Data":"ff55d4271b732daffb09252f975b0aef54b5bd41afcc5c09c89a0f13872d14d2"} Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.134176 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.134978 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-zqm4p" event={"ID":"b2df226c-65e0-4517-9e9c-dab78843423f","Type":"ContainerDied","Data":"6ae9d7726a41601292c11d006ceb9ac5f46fd3ea22e5513adae6015a6a63da4c"} Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.172323 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=9.037306469 podStartE2EDuration="43.172300803s" podCreationTimestamp="2025-11-25 12:44:04 +0000 UTC" firstStartedPulling="2025-11-25 12:44:06.830607427 +0000 UTC m=+992.002199768" lastFinishedPulling="2025-11-25 12:44:40.965601751 +0000 UTC m=+1026.137194102" observedRunningTime="2025-11-25 12:44:47.151233816 +0000 UTC m=+1032.322826167" watchObservedRunningTime="2025-11-25 12:44:47.172300803 +0000 UTC m=+1032.343893154" Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.307477 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-zqm4p"] Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.311631 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-zqm4p"] Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.318322 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 12:44:47 crc kubenswrapper[4675]: W1125 12:44:47.349974 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2d15dcc_e29e_4b04_8de0_911cc8190e33.slice/crio-4961521ca86eb304f41b241f4c2f31d910905524363bf9ac04a49b54df602604 WatchSource:0}: Error finding container 4961521ca86eb304f41b241f4c2f31d910905524363bf9ac04a49b54df602604: Status 404 returned error can't find the container with id 4961521ca86eb304f41b241f4c2f31d910905524363bf9ac04a49b54df602604 Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.543912 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2df226c-65e0-4517-9e9c-dab78843423f" path="/var/lib/kubelet/pods/b2df226c-65e0-4517-9e9c-dab78843423f/volumes" Nov 25 12:44:47 crc kubenswrapper[4675]: I1125 12:44:47.544347 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d81b8eb2-3df1-4aa8-82f6-67af08769677" path="/var/lib/kubelet/pods/d81b8eb2-3df1-4aa8-82f6-67af08769677/volumes" Nov 25 12:44:48 crc kubenswrapper[4675]: I1125 12:44:48.153575 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d2d15dcc-e29e-4b04-8de0-911cc8190e33","Type":"ContainerStarted","Data":"4961521ca86eb304f41b241f4c2f31d910905524363bf9ac04a49b54df602604"} Nov 25 12:44:48 crc kubenswrapper[4675]: I1125 12:44:48.157397 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" event={"ID":"47599dee-e400-48de-a84e-781c9c2bc826","Type":"ContainerStarted","Data":"014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0"} Nov 25 12:44:48 crc kubenswrapper[4675]: I1125 12:44:48.157512 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:48 crc kubenswrapper[4675]: I1125 12:44:48.160063 4675 generic.go:334] "Generic (PLEG): container finished" podID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerID="7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9" exitCode=0 Nov 25 12:44:48 crc kubenswrapper[4675]: I1125 12:44:48.160257 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" event={"ID":"00b2e92d-156b-43e4-8f42-b4fa7e9e468f","Type":"ContainerDied","Data":"7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9"} Nov 25 12:44:48 crc kubenswrapper[4675]: I1125 12:44:48.180910 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" podStartSLOduration=2.728474623 podStartE2EDuration="3.180889138s" podCreationTimestamp="2025-11-25 12:44:45 +0000 UTC" firstStartedPulling="2025-11-25 12:44:46.436489053 +0000 UTC m=+1031.608081394" lastFinishedPulling="2025-11-25 12:44:46.888903568 +0000 UTC m=+1032.060495909" observedRunningTime="2025-11-25 12:44:48.177747997 +0000 UTC m=+1033.349340358" watchObservedRunningTime="2025-11-25 12:44:48.180889138 +0000 UTC m=+1033.352481499" Nov 25 12:44:49 crc kubenswrapper[4675]: I1125 12:44:49.167842 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d2d15dcc-e29e-4b04-8de0-911cc8190e33","Type":"ContainerStarted","Data":"beee80a8d69c8deb515ef0c50d0e54603d566d168d30eb1f539eddb71b275ed3"} Nov 25 12:44:49 crc kubenswrapper[4675]: I1125 12:44:49.168102 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d2d15dcc-e29e-4b04-8de0-911cc8190e33","Type":"ContainerStarted","Data":"734c56be9ea0bce8c4ad6021cabed698e3c2ef5325f542f4db58dac0eb42ebde"} Nov 25 12:44:49 crc kubenswrapper[4675]: I1125 12:44:49.169498 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 12:44:49 crc kubenswrapper[4675]: I1125 12:44:49.169614 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" event={"ID":"00b2e92d-156b-43e4-8f42-b4fa7e9e468f","Type":"ContainerStarted","Data":"bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43"} Nov 25 12:44:49 crc kubenswrapper[4675]: I1125 12:44:49.188468 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.118242045 podStartE2EDuration="3.188446769s" podCreationTimestamp="2025-11-25 12:44:46 +0000 UTC" firstStartedPulling="2025-11-25 12:44:47.353559687 +0000 UTC m=+1032.525152028" lastFinishedPulling="2025-11-25 12:44:48.423764411 +0000 UTC m=+1033.595356752" observedRunningTime="2025-11-25 12:44:49.182991134 +0000 UTC m=+1034.354583475" watchObservedRunningTime="2025-11-25 12:44:49.188446769 +0000 UTC m=+1034.360039120" Nov 25 12:44:49 crc kubenswrapper[4675]: I1125 12:44:49.204872 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" podStartSLOduration=3.723287524 podStartE2EDuration="4.204858346s" podCreationTimestamp="2025-11-25 12:44:45 +0000 UTC" firstStartedPulling="2025-11-25 12:44:46.691154135 +0000 UTC m=+1031.862746476" lastFinishedPulling="2025-11-25 12:44:47.172724957 +0000 UTC m=+1032.344317298" observedRunningTime="2025-11-25 12:44:49.204088111 +0000 UTC m=+1034.375680472" watchObservedRunningTime="2025-11-25 12:44:49.204858346 +0000 UTC m=+1034.376450677" Nov 25 12:44:50 crc kubenswrapper[4675]: I1125 12:44:50.177148 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:50 crc kubenswrapper[4675]: I1125 12:44:50.732976 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 12:44:54 crc kubenswrapper[4675]: I1125 12:44:54.052848 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 12:44:54 crc kubenswrapper[4675]: I1125 12:44:54.053162 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 12:44:54 crc kubenswrapper[4675]: I1125 12:44:54.164094 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 12:44:54 crc kubenswrapper[4675]: I1125 12:44:54.251424 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.219767 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8d262fe-fd03-43eb-a9d8-fb43896cf021","Type":"ContainerStarted","Data":"72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615"} Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.220267 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.236645 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.842520715 podStartE2EDuration="48.236614097s" podCreationTimestamp="2025-11-25 12:44:07 +0000 UTC" firstStartedPulling="2025-11-25 12:44:08.826165271 +0000 UTC m=+993.997757612" lastFinishedPulling="2025-11-25 12:44:54.220258653 +0000 UTC m=+1039.391850994" observedRunningTime="2025-11-25 12:44:55.235371087 +0000 UTC m=+1040.406963428" watchObservedRunningTime="2025-11-25 12:44:55.236614097 +0000 UTC m=+1040.408206438" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.386456 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-z5vkb"] Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.387484 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.404467 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-z5vkb"] Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.476001 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjnmn\" (UniqueName: \"kubernetes.io/projected/54a0d51f-0bf4-40d2-891e-9b71f9014203-kube-api-access-cjnmn\") pod \"keystone-db-create-z5vkb\" (UID: \"54a0d51f-0bf4-40d2-891e-9b71f9014203\") " pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.578205 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjnmn\" (UniqueName: \"kubernetes.io/projected/54a0d51f-0bf4-40d2-891e-9b71f9014203-kube-api-access-cjnmn\") pod \"keystone-db-create-z5vkb\" (UID: \"54a0d51f-0bf4-40d2-891e-9b71f9014203\") " pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.598753 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjnmn\" (UniqueName: \"kubernetes.io/projected/54a0d51f-0bf4-40d2-891e-9b71f9014203-kube-api-access-cjnmn\") pod \"keystone-db-create-z5vkb\" (UID: \"54a0d51f-0bf4-40d2-891e-9b71f9014203\") " pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.652842 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-j4wk6"] Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.654078 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.671283 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-j4wk6"] Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.703593 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.833522 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4v85\" (UniqueName: \"kubernetes.io/projected/d2862e20-69d5-41fc-a821-bdffe2614102-kube-api-access-f4v85\") pod \"placement-db-create-j4wk6\" (UID: \"d2862e20-69d5-41fc-a821-bdffe2614102\") " pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.835017 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.848465 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.848498 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.937121 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4v85\" (UniqueName: \"kubernetes.io/projected/d2862e20-69d5-41fc-a821-bdffe2614102-kube-api-access-f4v85\") pod \"placement-db-create-j4wk6\" (UID: \"d2862e20-69d5-41fc-a821-bdffe2614102\") " pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.990646 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4v85\" (UniqueName: \"kubernetes.io/projected/d2862e20-69d5-41fc-a821-bdffe2614102-kube-api-access-f4v85\") pod \"placement-db-create-j4wk6\" (UID: \"d2862e20-69d5-41fc-a821-bdffe2614102\") " pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:55 crc kubenswrapper[4675]: I1125 12:44:55.995427 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.161955 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.213457 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-jpxg6"] Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.238078 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" podUID="47599dee-e400-48de-a84e-781c9c2bc826" containerName="dnsmasq-dns" containerID="cri-o://014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0" gracePeriod=10 Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.272985 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.339169 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-z5vkb"] Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.343785 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.804427 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.814868 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-j4wk6"] Nov 25 12:44:56 crc kubenswrapper[4675]: W1125 12:44:56.824451 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2862e20_69d5_41fc_a821_bdffe2614102.slice/crio-d55b25981207240a29718468cb2a60559a3b2904d99091b181960406e1d5fa45 WatchSource:0}: Error finding container d55b25981207240a29718468cb2a60559a3b2904d99091b181960406e1d5fa45: Status 404 returned error can't find the container with id d55b25981207240a29718468cb2a60559a3b2904d99091b181960406e1d5fa45 Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.961143 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-ovsdbserver-nb\") pod \"47599dee-e400-48de-a84e-781c9c2bc826\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.961251 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-dns-svc\") pod \"47599dee-e400-48de-a84e-781c9c2bc826\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.961331 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpj79\" (UniqueName: \"kubernetes.io/projected/47599dee-e400-48de-a84e-781c9c2bc826-kube-api-access-qpj79\") pod \"47599dee-e400-48de-a84e-781c9c2bc826\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.961354 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-config\") pod \"47599dee-e400-48de-a84e-781c9c2bc826\" (UID: \"47599dee-e400-48de-a84e-781c9c2bc826\") " Nov 25 12:44:56 crc kubenswrapper[4675]: I1125 12:44:56.971005 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47599dee-e400-48de-a84e-781c9c2bc826-kube-api-access-qpj79" (OuterVolumeSpecName: "kube-api-access-qpj79") pod "47599dee-e400-48de-a84e-781c9c2bc826" (UID: "47599dee-e400-48de-a84e-781c9c2bc826"). InnerVolumeSpecName "kube-api-access-qpj79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.018702 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "47599dee-e400-48de-a84e-781c9c2bc826" (UID: "47599dee-e400-48de-a84e-781c9c2bc826"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.020072 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-config" (OuterVolumeSpecName: "config") pod "47599dee-e400-48de-a84e-781c9c2bc826" (UID: "47599dee-e400-48de-a84e-781c9c2bc826"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.032596 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "47599dee-e400-48de-a84e-781c9c2bc826" (UID: "47599dee-e400-48de-a84e-781c9c2bc826"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.063059 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.063126 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpj79\" (UniqueName: \"kubernetes.io/projected/47599dee-e400-48de-a84e-781c9c2bc826-kube-api-access-qpj79\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.063141 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.063153 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47599dee-e400-48de-a84e-781c9c2bc826-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.244646 4675 generic.go:334] "Generic (PLEG): container finished" podID="47599dee-e400-48de-a84e-781c9c2bc826" containerID="014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0" exitCode=0 Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.244708 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" event={"ID":"47599dee-e400-48de-a84e-781c9c2bc826","Type":"ContainerDied","Data":"014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0"} Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.244731 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.245251 4675 scope.go:117] "RemoveContainer" containerID="014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.245232 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-jpxg6" event={"ID":"47599dee-e400-48de-a84e-781c9c2bc826","Type":"ContainerDied","Data":"ff55d4271b732daffb09252f975b0aef54b5bd41afcc5c09c89a0f13872d14d2"} Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.248920 4675 generic.go:334] "Generic (PLEG): container finished" podID="54a0d51f-0bf4-40d2-891e-9b71f9014203" containerID="d1161221510169542d73cd60466b25d57473e9204d86608e2caaa8f52232ecc5" exitCode=0 Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.249002 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-z5vkb" event={"ID":"54a0d51f-0bf4-40d2-891e-9b71f9014203","Type":"ContainerDied","Data":"d1161221510169542d73cd60466b25d57473e9204d86608e2caaa8f52232ecc5"} Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.249079 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-z5vkb" event={"ID":"54a0d51f-0bf4-40d2-891e-9b71f9014203","Type":"ContainerStarted","Data":"d81117ee6d12502d3a60eec612567f7f7c9ccb03b0bc6fef889eaa3322a4b874"} Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.251181 4675 generic.go:334] "Generic (PLEG): container finished" podID="d2862e20-69d5-41fc-a821-bdffe2614102" containerID="abcc8f0f57f25da6f6e957db44e8f545d449d978d0a9e4931e9fbaf2b0aff404" exitCode=0 Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.251496 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j4wk6" event={"ID":"d2862e20-69d5-41fc-a821-bdffe2614102","Type":"ContainerDied","Data":"abcc8f0f57f25da6f6e957db44e8f545d449d978d0a9e4931e9fbaf2b0aff404"} Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.251654 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j4wk6" event={"ID":"d2862e20-69d5-41fc-a821-bdffe2614102","Type":"ContainerStarted","Data":"d55b25981207240a29718468cb2a60559a3b2904d99091b181960406e1d5fa45"} Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.280663 4675 scope.go:117] "RemoveContainer" containerID="5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.313728 4675 scope.go:117] "RemoveContainer" containerID="014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0" Nov 25 12:44:57 crc kubenswrapper[4675]: E1125 12:44:57.314317 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0\": container with ID starting with 014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0 not found: ID does not exist" containerID="014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.314367 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0"} err="failed to get container status \"014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0\": rpc error: code = NotFound desc = could not find container \"014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0\": container with ID starting with 014e1eafb2c6863f4b1514683c7ae4544c614b9a6bbe55808f331959025e0ee0 not found: ID does not exist" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.314399 4675 scope.go:117] "RemoveContainer" containerID="5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674" Nov 25 12:44:57 crc kubenswrapper[4675]: E1125 12:44:57.314736 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674\": container with ID starting with 5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674 not found: ID does not exist" containerID="5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.314760 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674"} err="failed to get container status \"5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674\": rpc error: code = NotFound desc = could not find container \"5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674\": container with ID starting with 5e08746c997cf93a8cb6d6c72c06f08c6ee1564ea7d4cf2960e909d6e9794674 not found: ID does not exist" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.317886 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-jpxg6"] Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.323860 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-jpxg6"] Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.541781 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47599dee-e400-48de-a84e-781c9c2bc826" path="/var/lib/kubelet/pods/47599dee-e400-48de-a84e-781c9c2bc826/volumes" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.941811 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-k6ghf"] Nov 25 12:44:57 crc kubenswrapper[4675]: E1125 12:44:57.942185 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47599dee-e400-48de-a84e-781c9c2bc826" containerName="init" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.942206 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="47599dee-e400-48de-a84e-781c9c2bc826" containerName="init" Nov 25 12:44:57 crc kubenswrapper[4675]: E1125 12:44:57.942219 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47599dee-e400-48de-a84e-781c9c2bc826" containerName="dnsmasq-dns" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.942227 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="47599dee-e400-48de-a84e-781c9c2bc826" containerName="dnsmasq-dns" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.942424 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="47599dee-e400-48de-a84e-781c9c2bc826" containerName="dnsmasq-dns" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.943259 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:57 crc kubenswrapper[4675]: I1125 12:44:57.972782 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-k6ghf"] Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.083359 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-dns-svc\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.083420 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4qvf\" (UniqueName: \"kubernetes.io/projected/bce12a31-015a-4f94-9d41-46ad86692cc0-kube-api-access-x4qvf\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.083513 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-config\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.083543 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.083568 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.185622 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-config\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.185681 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.185708 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.185791 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-dns-svc\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.185824 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4qvf\" (UniqueName: \"kubernetes.io/projected/bce12a31-015a-4f94-9d41-46ad86692cc0-kube-api-access-x4qvf\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.187129 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-config\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.187741 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.188330 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.188944 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-dns-svc\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.204809 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4qvf\" (UniqueName: \"kubernetes.io/projected/bce12a31-015a-4f94-9d41-46ad86692cc0-kube-api-access-x4qvf\") pod \"dnsmasq-dns-698758b865-k6ghf\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.258875 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.686955 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.708894 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.800597 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjnmn\" (UniqueName: \"kubernetes.io/projected/54a0d51f-0bf4-40d2-891e-9b71f9014203-kube-api-access-cjnmn\") pod \"54a0d51f-0bf4-40d2-891e-9b71f9014203\" (UID: \"54a0d51f-0bf4-40d2-891e-9b71f9014203\") " Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.800933 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4v85\" (UniqueName: \"kubernetes.io/projected/d2862e20-69d5-41fc-a821-bdffe2614102-kube-api-access-f4v85\") pod \"d2862e20-69d5-41fc-a821-bdffe2614102\" (UID: \"d2862e20-69d5-41fc-a821-bdffe2614102\") " Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.808063 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54a0d51f-0bf4-40d2-891e-9b71f9014203-kube-api-access-cjnmn" (OuterVolumeSpecName: "kube-api-access-cjnmn") pod "54a0d51f-0bf4-40d2-891e-9b71f9014203" (UID: "54a0d51f-0bf4-40d2-891e-9b71f9014203"). InnerVolumeSpecName "kube-api-access-cjnmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.816791 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2862e20-69d5-41fc-a821-bdffe2614102-kube-api-access-f4v85" (OuterVolumeSpecName: "kube-api-access-f4v85") pod "d2862e20-69d5-41fc-a821-bdffe2614102" (UID: "d2862e20-69d5-41fc-a821-bdffe2614102"). InnerVolumeSpecName "kube-api-access-f4v85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.830409 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-k6ghf"] Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.906065 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjnmn\" (UniqueName: \"kubernetes.io/projected/54a0d51f-0bf4-40d2-891e-9b71f9014203-kube-api-access-cjnmn\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:58 crc kubenswrapper[4675]: I1125 12:44:58.906105 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4v85\" (UniqueName: \"kubernetes.io/projected/d2862e20-69d5-41fc-a821-bdffe2614102-kube-api-access-f4v85\") on node \"crc\" DevicePath \"\"" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.110109 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.110815 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2862e20-69d5-41fc-a821-bdffe2614102" containerName="mariadb-database-create" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.110864 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2862e20-69d5-41fc-a821-bdffe2614102" containerName="mariadb-database-create" Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.110900 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a0d51f-0bf4-40d2-891e-9b71f9014203" containerName="mariadb-database-create" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.110928 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a0d51f-0bf4-40d2-891e-9b71f9014203" containerName="mariadb-database-create" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.111154 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a0d51f-0bf4-40d2-891e-9b71f9014203" containerName="mariadb-database-create" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.111217 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2862e20-69d5-41fc-a821-bdffe2614102" containerName="mariadb-database-create" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.125501 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.127262 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.128971 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.129218 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.129350 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.129486 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-rsjq2" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.275629 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-z5vkb" event={"ID":"54a0d51f-0bf4-40d2-891e-9b71f9014203","Type":"ContainerDied","Data":"d81117ee6d12502d3a60eec612567f7f7c9ccb03b0bc6fef889eaa3322a4b874"} Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.275672 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d81117ee6d12502d3a60eec612567f7f7c9ccb03b0bc6fef889eaa3322a4b874" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.275735 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-z5vkb" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.279233 4675 generic.go:334] "Generic (PLEG): container finished" podID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerID="b56454f9271d101890817656e721c10cb4bc123722e66e86adc8ee42ad668e2e" exitCode=0 Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.279458 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-k6ghf" event={"ID":"bce12a31-015a-4f94-9d41-46ad86692cc0","Type":"ContainerDied","Data":"b56454f9271d101890817656e721c10cb4bc123722e66e86adc8ee42ad668e2e"} Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.279554 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-k6ghf" event={"ID":"bce12a31-015a-4f94-9d41-46ad86692cc0","Type":"ContainerStarted","Data":"5ab70f57b41994cf6903e97913e49aff3b156c2099300da6a08b8b3b34cd8e4f"} Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.280957 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j4wk6" event={"ID":"d2862e20-69d5-41fc-a821-bdffe2614102","Type":"ContainerDied","Data":"d55b25981207240a29718468cb2a60559a3b2904d99091b181960406e1d5fa45"} Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.280975 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d55b25981207240a29718468cb2a60559a3b2904d99091b181960406e1d5fa45" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.280990 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4wk6" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.313551 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/60249dd6-be73-49eb-861a-54bb77652335-cache\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.313608 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.313637 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.313687 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlfw4\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-kube-api-access-jlfw4\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.313731 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/60249dd6-be73-49eb-861a-54bb77652335-lock\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.415935 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/60249dd6-be73-49eb-861a-54bb77652335-cache\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.415995 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.416023 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.416062 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlfw4\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-kube-api-access-jlfw4\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.416109 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/60249dd6-be73-49eb-861a-54bb77652335-lock\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.416788 4675 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.416831 4675 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.416887 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift podName:60249dd6-be73-49eb-861a-54bb77652335 nodeName:}" failed. No retries permitted until 2025-11-25 12:44:59.916866362 +0000 UTC m=+1045.088458763 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift") pod "swift-storage-0" (UID: "60249dd6-be73-49eb-861a-54bb77652335") : configmap "swift-ring-files" not found Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.417118 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/60249dd6-be73-49eb-861a-54bb77652335-cache\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.417311 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.417513 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/60249dd6-be73-49eb-861a-54bb77652335-lock\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.439765 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlfw4\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-kube-api-access-jlfw4\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.445150 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: I1125 12:44:59.924887 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.925058 4675 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.925077 4675 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 12:44:59 crc kubenswrapper[4675]: E1125 12:44:59.925119 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift podName:60249dd6-be73-49eb-861a-54bb77652335 nodeName:}" failed. No retries permitted until 2025-11-25 12:45:00.925104902 +0000 UTC m=+1046.096697243 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift") pod "swift-storage-0" (UID: "60249dd6-be73-49eb-861a-54bb77652335") : configmap "swift-ring-files" not found Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.130437 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q"] Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.131632 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.136070 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.136104 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.138332 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q"] Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.229314 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-secret-volume\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.229365 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk42h\" (UniqueName: \"kubernetes.io/projected/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-kube-api-access-hk42h\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.229414 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-config-volume\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.290082 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-k6ghf" event={"ID":"bce12a31-015a-4f94-9d41-46ad86692cc0","Type":"ContainerStarted","Data":"9615d7abe8242e9b994843e36026f9b5b6f26f3d26426c8e9229972f1d55c0db"} Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.290262 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.324521 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-k6ghf" podStartSLOduration=3.324502483 podStartE2EDuration="3.324502483s" podCreationTimestamp="2025-11-25 12:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:00.323093837 +0000 UTC m=+1045.494686188" watchObservedRunningTime="2025-11-25 12:45:00.324502483 +0000 UTC m=+1045.496094834" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.330452 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-secret-volume\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.330711 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk42h\" (UniqueName: \"kubernetes.io/projected/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-kube-api-access-hk42h\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.330807 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-config-volume\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.332646 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-config-volume\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.338620 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-secret-volume\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.379442 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk42h\" (UniqueName: \"kubernetes.io/projected/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-kube-api-access-hk42h\") pod \"collect-profiles-29401245-hmc2q\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.454221 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.947229 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:45:00 crc kubenswrapper[4675]: E1125 12:45:00.947880 4675 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 12:45:00 crc kubenswrapper[4675]: E1125 12:45:00.947903 4675 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 12:45:00 crc kubenswrapper[4675]: E1125 12:45:00.947962 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift podName:60249dd6-be73-49eb-861a-54bb77652335 nodeName:}" failed. No retries permitted until 2025-11-25 12:45:02.947944533 +0000 UTC m=+1048.119536874 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift") pod "swift-storage-0" (UID: "60249dd6-be73-49eb-861a-54bb77652335") : configmap "swift-ring-files" not found Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.980394 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-7rn6x"] Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.981617 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:00 crc kubenswrapper[4675]: I1125 12:45:00.987142 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-7rn6x"] Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.036258 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q"] Nov 25 12:45:01 crc kubenswrapper[4675]: W1125 12:45:01.049403 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb1cf8b2_fe5b_466d_859b_5f4cd3c04b95.slice/crio-60da4a35fd777554d804410705fb3dcf6d2baffd3ca0052ccf0bd86d57f39ea1 WatchSource:0}: Error finding container 60da4a35fd777554d804410705fb3dcf6d2baffd3ca0052ccf0bd86d57f39ea1: Status 404 returned error can't find the container with id 60da4a35fd777554d804410705fb3dcf6d2baffd3ca0052ccf0bd86d57f39ea1 Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.150460 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbpwj\" (UniqueName: \"kubernetes.io/projected/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0-kube-api-access-xbpwj\") pod \"glance-db-create-7rn6x\" (UID: \"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0\") " pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.251506 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbpwj\" (UniqueName: \"kubernetes.io/projected/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0-kube-api-access-xbpwj\") pod \"glance-db-create-7rn6x\" (UID: \"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0\") " pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.277788 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbpwj\" (UniqueName: \"kubernetes.io/projected/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0-kube-api-access-xbpwj\") pod \"glance-db-create-7rn6x\" (UID: \"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0\") " pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.300200 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" event={"ID":"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95","Type":"ContainerStarted","Data":"1ab80084b48bf90bbfacbe39e352a72391ad90eedadfd137a2f858b734c00c1c"} Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.300307 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" event={"ID":"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95","Type":"ContainerStarted","Data":"60da4a35fd777554d804410705fb3dcf6d2baffd3ca0052ccf0bd86d57f39ea1"} Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.302068 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.332020 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" podStartSLOduration=1.331995692 podStartE2EDuration="1.331995692s" podCreationTimestamp="2025-11-25 12:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:01.322983923 +0000 UTC m=+1046.494576284" watchObservedRunningTime="2025-11-25 12:45:01.331995692 +0000 UTC m=+1046.503588043" Nov 25 12:45:01 crc kubenswrapper[4675]: I1125 12:45:01.816185 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-7rn6x"] Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.077464 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.307761 4675 generic.go:334] "Generic (PLEG): container finished" podID="f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0" containerID="da138338ad4f450445c9e8be5b6fa14cf8d93e93d05282c1254724b3dd7ce994" exitCode=0 Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.307874 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-7rn6x" event={"ID":"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0","Type":"ContainerDied","Data":"da138338ad4f450445c9e8be5b6fa14cf8d93e93d05282c1254724b3dd7ce994"} Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.308087 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-7rn6x" event={"ID":"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0","Type":"ContainerStarted","Data":"ff025d56cf8aa199f431b3283d0697dbc196f50ff81371e51f87ec763cc1ba06"} Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.309902 4675 generic.go:334] "Generic (PLEG): container finished" podID="eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" containerID="1ab80084b48bf90bbfacbe39e352a72391ad90eedadfd137a2f858b734c00c1c" exitCode=0 Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.309946 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" event={"ID":"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95","Type":"ContainerDied","Data":"1ab80084b48bf90bbfacbe39e352a72391ad90eedadfd137a2f858b734c00c1c"} Nov 25 12:45:02 crc kubenswrapper[4675]: I1125 12:45:02.981792 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:45:02 crc kubenswrapper[4675]: E1125 12:45:02.982036 4675 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 12:45:02 crc kubenswrapper[4675]: E1125 12:45:02.982074 4675 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 12:45:02 crc kubenswrapper[4675]: E1125 12:45:02.982137 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift podName:60249dd6-be73-49eb-861a-54bb77652335 nodeName:}" failed. No retries permitted until 2025-11-25 12:45:06.982115338 +0000 UTC m=+1052.153707679 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift") pod "swift-storage-0" (UID: "60249dd6-be73-49eb-861a-54bb77652335") : configmap "swift-ring-files" not found Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.024399 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-g7rkg"] Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.025459 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.027313 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.027438 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.027746 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.038391 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-g7rkg"] Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.185428 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-dispersionconf\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.185992 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-ring-data-devices\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.186175 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99sq8\" (UniqueName: \"kubernetes.io/projected/959f7b20-344e-4759-8142-19a41f250c72-kube-api-access-99sq8\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.186275 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/959f7b20-344e-4759-8142-19a41f250c72-etc-swift\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.186388 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-scripts\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.186503 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-combined-ca-bundle\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.186613 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-swiftconf\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287691 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-dispersionconf\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287755 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-ring-data-devices\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287847 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99sq8\" (UniqueName: \"kubernetes.io/projected/959f7b20-344e-4759-8142-19a41f250c72-kube-api-access-99sq8\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287872 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/959f7b20-344e-4759-8142-19a41f250c72-etc-swift\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287906 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-scripts\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287940 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-combined-ca-bundle\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.287970 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-swiftconf\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.288452 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/959f7b20-344e-4759-8142-19a41f250c72-etc-swift\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.289108 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-scripts\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.289114 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-ring-data-devices\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.294332 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-combined-ca-bundle\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.294742 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-swiftconf\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.300304 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-dispersionconf\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.311990 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99sq8\" (UniqueName: \"kubernetes.io/projected/959f7b20-344e-4759-8142-19a41f250c72-kube-api-access-99sq8\") pod \"swift-ring-rebalance-g7rkg\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.345260 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.756940 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.767225 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.855399 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-g7rkg"] Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.900530 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-config-volume\") pod \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.900576 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-secret-volume\") pod \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.900599 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk42h\" (UniqueName: \"kubernetes.io/projected/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-kube-api-access-hk42h\") pod \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\" (UID: \"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95\") " Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.900635 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbpwj\" (UniqueName: \"kubernetes.io/projected/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0-kube-api-access-xbpwj\") pod \"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0\" (UID: \"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0\") " Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.901099 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-config-volume" (OuterVolumeSpecName: "config-volume") pod "eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" (UID: "eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.906718 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0-kube-api-access-xbpwj" (OuterVolumeSpecName: "kube-api-access-xbpwj") pod "f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0" (UID: "f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0"). InnerVolumeSpecName "kube-api-access-xbpwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.907777 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" (UID: "eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:03 crc kubenswrapper[4675]: I1125 12:45:03.911557 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-kube-api-access-hk42h" (OuterVolumeSpecName: "kube-api-access-hk42h") pod "eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" (UID: "eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95"). InnerVolumeSpecName "kube-api-access-hk42h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.002533 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk42h\" (UniqueName: \"kubernetes.io/projected/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-kube-api-access-hk42h\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.002591 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbpwj\" (UniqueName: \"kubernetes.io/projected/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0-kube-api-access-xbpwj\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.002606 4675 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.002618 4675 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.328655 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-g7rkg" event={"ID":"959f7b20-344e-4759-8142-19a41f250c72","Type":"ContainerStarted","Data":"8c4fa6777e8cbd21704c8cad29ce5d4d5d4feb36d7f47fcd8117a97bbb1b0928"} Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.329977 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" event={"ID":"eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95","Type":"ContainerDied","Data":"60da4a35fd777554d804410705fb3dcf6d2baffd3ca0052ccf0bd86d57f39ea1"} Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.330002 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60da4a35fd777554d804410705fb3dcf6d2baffd3ca0052ccf0bd86d57f39ea1" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.330065 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.333165 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-7rn6x" event={"ID":"f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0","Type":"ContainerDied","Data":"ff025d56cf8aa199f431b3283d0697dbc196f50ff81371e51f87ec763cc1ba06"} Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.333196 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff025d56cf8aa199f431b3283d0697dbc196f50ff81371e51f87ec763cc1ba06" Nov 25 12:45:04 crc kubenswrapper[4675]: I1125 12:45:04.333250 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7rn6x" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.452679 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0d38-account-create-rbkxp"] Nov 25 12:45:05 crc kubenswrapper[4675]: E1125 12:45:05.453319 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0" containerName="mariadb-database-create" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.453331 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0" containerName="mariadb-database-create" Nov 25 12:45:05 crc kubenswrapper[4675]: E1125 12:45:05.453347 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" containerName="collect-profiles" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.453352 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" containerName="collect-profiles" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.453504 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0" containerName="mariadb-database-create" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.453523 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" containerName="collect-profiles" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.454173 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.458864 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.469149 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0d38-account-create-rbkxp"] Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.640078 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94gzb\" (UniqueName: \"kubernetes.io/projected/43901de0-d24f-4bee-aeaf-8c7c610b5312-kube-api-access-94gzb\") pod \"keystone-0d38-account-create-rbkxp\" (UID: \"43901de0-d24f-4bee-aeaf-8c7c610b5312\") " pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.742019 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-a7cb-account-create-jlj4x"] Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.743512 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.743687 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94gzb\" (UniqueName: \"kubernetes.io/projected/43901de0-d24f-4bee-aeaf-8c7c610b5312-kube-api-access-94gzb\") pod \"keystone-0d38-account-create-rbkxp\" (UID: \"43901de0-d24f-4bee-aeaf-8c7c610b5312\") " pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.750769 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.753748 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-a7cb-account-create-jlj4x"] Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.784963 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94gzb\" (UniqueName: \"kubernetes.io/projected/43901de0-d24f-4bee-aeaf-8c7c610b5312-kube-api-access-94gzb\") pod \"keystone-0d38-account-create-rbkxp\" (UID: \"43901de0-d24f-4bee-aeaf-8c7c610b5312\") " pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.846375 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vz8f6\" (UniqueName: \"kubernetes.io/projected/87535d76-93d8-4f78-bb2e-9c9a1ac266d3-kube-api-access-vz8f6\") pod \"placement-a7cb-account-create-jlj4x\" (UID: \"87535d76-93d8-4f78-bb2e-9c9a1ac266d3\") " pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.948271 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vz8f6\" (UniqueName: \"kubernetes.io/projected/87535d76-93d8-4f78-bb2e-9c9a1ac266d3-kube-api-access-vz8f6\") pod \"placement-a7cb-account-create-jlj4x\" (UID: \"87535d76-93d8-4f78-bb2e-9c9a1ac266d3\") " pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:05 crc kubenswrapper[4675]: I1125 12:45:05.981750 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vz8f6\" (UniqueName: \"kubernetes.io/projected/87535d76-93d8-4f78-bb2e-9c9a1ac266d3-kube-api-access-vz8f6\") pod \"placement-a7cb-account-create-jlj4x\" (UID: \"87535d76-93d8-4f78-bb2e-9c9a1ac266d3\") " pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:06 crc kubenswrapper[4675]: I1125 12:45:06.060250 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:06 crc kubenswrapper[4675]: I1125 12:45:06.084039 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:07 crc kubenswrapper[4675]: I1125 12:45:07.066950 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:45:07 crc kubenswrapper[4675]: E1125 12:45:07.067147 4675 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 12:45:07 crc kubenswrapper[4675]: E1125 12:45:07.067525 4675 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 12:45:07 crc kubenswrapper[4675]: E1125 12:45:07.067584 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift podName:60249dd6-be73-49eb-861a-54bb77652335 nodeName:}" failed. No retries permitted until 2025-11-25 12:45:15.067566297 +0000 UTC m=+1060.239158638 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift") pod "swift-storage-0" (UID: "60249dd6-be73-49eb-861a-54bb77652335") : configmap "swift-ring-files" not found Nov 25 12:45:07 crc kubenswrapper[4675]: I1125 12:45:07.867602 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0d38-account-create-rbkxp"] Nov 25 12:45:07 crc kubenswrapper[4675]: W1125 12:45:07.875701 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43901de0_d24f_4bee_aeaf_8c7c610b5312.slice/crio-3f659b92365f023385b106462b93747c7aa9eae68b3747bc473815cec941e66e WatchSource:0}: Error finding container 3f659b92365f023385b106462b93747c7aa9eae68b3747bc473815cec941e66e: Status 404 returned error can't find the container with id 3f659b92365f023385b106462b93747c7aa9eae68b3747bc473815cec941e66e Nov 25 12:45:07 crc kubenswrapper[4675]: I1125 12:45:07.916367 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 12:45:08 crc kubenswrapper[4675]: W1125 12:45:08.012888 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87535d76_93d8_4f78_bb2e_9c9a1ac266d3.slice/crio-e04a8bf28a49d08de9257c19f9cfafd7e8fe8a74245e399fac8672ac1e652c60 WatchSource:0}: Error finding container e04a8bf28a49d08de9257c19f9cfafd7e8fe8a74245e399fac8672ac1e652c60: Status 404 returned error can't find the container with id e04a8bf28a49d08de9257c19f9cfafd7e8fe8a74245e399fac8672ac1e652c60 Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.013255 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-a7cb-account-create-jlj4x"] Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.260039 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.310156 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-mbn5b"] Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.310455 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerName="dnsmasq-dns" containerID="cri-o://bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43" gracePeriod=10 Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.367230 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-g7rkg" event={"ID":"959f7b20-344e-4759-8142-19a41f250c72","Type":"ContainerStarted","Data":"b4d5a7cb387d98e0656a19d6a3ef33ce589ec606e9a6910c5bf00f8de5f087e0"} Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.372015 4675 generic.go:334] "Generic (PLEG): container finished" podID="43901de0-d24f-4bee-aeaf-8c7c610b5312" containerID="734f96fae17492b0510fbc953a6d266fbb25ca5f6ddb6394544e6f7701b3c7fb" exitCode=0 Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.372102 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0d38-account-create-rbkxp" event={"ID":"43901de0-d24f-4bee-aeaf-8c7c610b5312","Type":"ContainerDied","Data":"734f96fae17492b0510fbc953a6d266fbb25ca5f6ddb6394544e6f7701b3c7fb"} Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.372129 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0d38-account-create-rbkxp" event={"ID":"43901de0-d24f-4bee-aeaf-8c7c610b5312","Type":"ContainerStarted","Data":"3f659b92365f023385b106462b93747c7aa9eae68b3747bc473815cec941e66e"} Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.374543 4675 generic.go:334] "Generic (PLEG): container finished" podID="87535d76-93d8-4f78-bb2e-9c9a1ac266d3" containerID="e80326bdb7a154e1d2f55fcec5ebba9e87f9132f0604c27670106006c62cc497" exitCode=0 Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.374591 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-a7cb-account-create-jlj4x" event={"ID":"87535d76-93d8-4f78-bb2e-9c9a1ac266d3","Type":"ContainerDied","Data":"e80326bdb7a154e1d2f55fcec5ebba9e87f9132f0604c27670106006c62cc497"} Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.374618 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-a7cb-account-create-jlj4x" event={"ID":"87535d76-93d8-4f78-bb2e-9c9a1ac266d3","Type":"ContainerStarted","Data":"e04a8bf28a49d08de9257c19f9cfafd7e8fe8a74245e399fac8672ac1e652c60"} Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.432756 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-g7rkg" podStartSLOduration=1.8599539109999998 podStartE2EDuration="5.432732598s" podCreationTimestamp="2025-11-25 12:45:03 +0000 UTC" firstStartedPulling="2025-11-25 12:45:03.872549677 +0000 UTC m=+1049.044142018" lastFinishedPulling="2025-11-25 12:45:07.445328354 +0000 UTC m=+1052.616920705" observedRunningTime="2025-11-25 12:45:08.397888158 +0000 UTC m=+1053.569480519" watchObservedRunningTime="2025-11-25 12:45:08.432732598 +0000 UTC m=+1053.604324939" Nov 25 12:45:08 crc kubenswrapper[4675]: I1125 12:45:08.826184 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.007029 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgsnm\" (UniqueName: \"kubernetes.io/projected/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-kube-api-access-bgsnm\") pod \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.007085 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-nb\") pod \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.007122 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-dns-svc\") pod \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.007264 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-config\") pod \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.007338 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-sb\") pod \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\" (UID: \"00b2e92d-156b-43e4-8f42-b4fa7e9e468f\") " Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.025222 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-kube-api-access-bgsnm" (OuterVolumeSpecName: "kube-api-access-bgsnm") pod "00b2e92d-156b-43e4-8f42-b4fa7e9e468f" (UID: "00b2e92d-156b-43e4-8f42-b4fa7e9e468f"). InnerVolumeSpecName "kube-api-access-bgsnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.076409 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-config" (OuterVolumeSpecName: "config") pod "00b2e92d-156b-43e4-8f42-b4fa7e9e468f" (UID: "00b2e92d-156b-43e4-8f42-b4fa7e9e468f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.088959 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "00b2e92d-156b-43e4-8f42-b4fa7e9e468f" (UID: "00b2e92d-156b-43e4-8f42-b4fa7e9e468f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.089353 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "00b2e92d-156b-43e4-8f42-b4fa7e9e468f" (UID: "00b2e92d-156b-43e4-8f42-b4fa7e9e468f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.093222 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "00b2e92d-156b-43e4-8f42-b4fa7e9e468f" (UID: "00b2e92d-156b-43e4-8f42-b4fa7e9e468f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.109378 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.109608 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgsnm\" (UniqueName: \"kubernetes.io/projected/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-kube-api-access-bgsnm\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.109695 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.109796 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.109961 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b2e92d-156b-43e4-8f42-b4fa7e9e468f-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.389158 4675 generic.go:334] "Generic (PLEG): container finished" podID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerID="bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43" exitCode=0 Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.389778 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.390506 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" event={"ID":"00b2e92d-156b-43e4-8f42-b4fa7e9e468f","Type":"ContainerDied","Data":"bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43"} Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.390576 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-mbn5b" event={"ID":"00b2e92d-156b-43e4-8f42-b4fa7e9e468f","Type":"ContainerDied","Data":"9b321a0fd00da867b49d28122233dfa868ea5ecafd9a1bc1114d437b1a5e1c48"} Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.390600 4675 scope.go:117] "RemoveContainer" containerID="bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.456993 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-mbn5b"] Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.459354 4675 scope.go:117] "RemoveContainer" containerID="7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.466171 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-mbn5b"] Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.486005 4675 scope.go:117] "RemoveContainer" containerID="bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43" Nov 25 12:45:09 crc kubenswrapper[4675]: E1125 12:45:09.486347 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43\": container with ID starting with bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43 not found: ID does not exist" containerID="bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.486372 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43"} err="failed to get container status \"bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43\": rpc error: code = NotFound desc = could not find container \"bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43\": container with ID starting with bef18d6c4ecbc7fa58e0c74ca5f7c6a7cbda967b2f170478cf29ffb4b8c95f43 not found: ID does not exist" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.486391 4675 scope.go:117] "RemoveContainer" containerID="7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9" Nov 25 12:45:09 crc kubenswrapper[4675]: E1125 12:45:09.486596 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9\": container with ID starting with 7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9 not found: ID does not exist" containerID="7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.486611 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9"} err="failed to get container status \"7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9\": rpc error: code = NotFound desc = could not find container \"7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9\": container with ID starting with 7f3bb7d707c8ee1dbffe60455a3478fa06fc41f23689dcf78080b6169c1658b9 not found: ID does not exist" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.547670 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" path="/var/lib/kubelet/pods/00b2e92d-156b-43e4-8f42-b4fa7e9e468f/volumes" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.841296 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:09 crc kubenswrapper[4675]: I1125 12:45:09.846380 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.025706 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94gzb\" (UniqueName: \"kubernetes.io/projected/43901de0-d24f-4bee-aeaf-8c7c610b5312-kube-api-access-94gzb\") pod \"43901de0-d24f-4bee-aeaf-8c7c610b5312\" (UID: \"43901de0-d24f-4bee-aeaf-8c7c610b5312\") " Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.025768 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vz8f6\" (UniqueName: \"kubernetes.io/projected/87535d76-93d8-4f78-bb2e-9c9a1ac266d3-kube-api-access-vz8f6\") pod \"87535d76-93d8-4f78-bb2e-9c9a1ac266d3\" (UID: \"87535d76-93d8-4f78-bb2e-9c9a1ac266d3\") " Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.034054 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87535d76-93d8-4f78-bb2e-9c9a1ac266d3-kube-api-access-vz8f6" (OuterVolumeSpecName: "kube-api-access-vz8f6") pod "87535d76-93d8-4f78-bb2e-9c9a1ac266d3" (UID: "87535d76-93d8-4f78-bb2e-9c9a1ac266d3"). InnerVolumeSpecName "kube-api-access-vz8f6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.034775 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43901de0-d24f-4bee-aeaf-8c7c610b5312-kube-api-access-94gzb" (OuterVolumeSpecName: "kube-api-access-94gzb") pod "43901de0-d24f-4bee-aeaf-8c7c610b5312" (UID: "43901de0-d24f-4bee-aeaf-8c7c610b5312"). InnerVolumeSpecName "kube-api-access-94gzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.128446 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94gzb\" (UniqueName: \"kubernetes.io/projected/43901de0-d24f-4bee-aeaf-8c7c610b5312-kube-api-access-94gzb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.128484 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vz8f6\" (UniqueName: \"kubernetes.io/projected/87535d76-93d8-4f78-bb2e-9c9a1ac266d3-kube-api-access-vz8f6\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.401199 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0d38-account-create-rbkxp" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.401204 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0d38-account-create-rbkxp" event={"ID":"43901de0-d24f-4bee-aeaf-8c7c610b5312","Type":"ContainerDied","Data":"3f659b92365f023385b106462b93747c7aa9eae68b3747bc473815cec941e66e"} Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.401250 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f659b92365f023385b106462b93747c7aa9eae68b3747bc473815cec941e66e" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.404632 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-a7cb-account-create-jlj4x" event={"ID":"87535d76-93d8-4f78-bb2e-9c9a1ac266d3","Type":"ContainerDied","Data":"e04a8bf28a49d08de9257c19f9cfafd7e8fe8a74245e399fac8672ac1e652c60"} Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.404700 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e04a8bf28a49d08de9257c19f9cfafd7e8fe8a74245e399fac8672ac1e652c60" Nov 25 12:45:10 crc kubenswrapper[4675]: I1125 12:45:10.404667 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a7cb-account-create-jlj4x" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.060255 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-4ef6-account-create-lvnrz"] Nov 25 12:45:11 crc kubenswrapper[4675]: E1125 12:45:11.060679 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerName="init" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.060702 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerName="init" Nov 25 12:45:11 crc kubenswrapper[4675]: E1125 12:45:11.060718 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87535d76-93d8-4f78-bb2e-9c9a1ac266d3" containerName="mariadb-account-create" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.060725 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="87535d76-93d8-4f78-bb2e-9c9a1ac266d3" containerName="mariadb-account-create" Nov 25 12:45:11 crc kubenswrapper[4675]: E1125 12:45:11.060746 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43901de0-d24f-4bee-aeaf-8c7c610b5312" containerName="mariadb-account-create" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.060754 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="43901de0-d24f-4bee-aeaf-8c7c610b5312" containerName="mariadb-account-create" Nov 25 12:45:11 crc kubenswrapper[4675]: E1125 12:45:11.060767 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerName="dnsmasq-dns" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.060774 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerName="dnsmasq-dns" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.060998 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="87535d76-93d8-4f78-bb2e-9c9a1ac266d3" containerName="mariadb-account-create" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.061018 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="00b2e92d-156b-43e4-8f42-b4fa7e9e468f" containerName="dnsmasq-dns" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.061038 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="43901de0-d24f-4bee-aeaf-8c7c610b5312" containerName="mariadb-account-create" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.061667 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.064537 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.069111 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4ef6-account-create-lvnrz"] Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.158087 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxknq\" (UniqueName: \"kubernetes.io/projected/37d54c36-f165-4ca9-b1f6-95771e796399-kube-api-access-xxknq\") pod \"glance-4ef6-account-create-lvnrz\" (UID: \"37d54c36-f165-4ca9-b1f6-95771e796399\") " pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.263404 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxknq\" (UniqueName: \"kubernetes.io/projected/37d54c36-f165-4ca9-b1f6-95771e796399-kube-api-access-xxknq\") pod \"glance-4ef6-account-create-lvnrz\" (UID: \"37d54c36-f165-4ca9-b1f6-95771e796399\") " pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.282485 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxknq\" (UniqueName: \"kubernetes.io/projected/37d54c36-f165-4ca9-b1f6-95771e796399-kube-api-access-xxknq\") pod \"glance-4ef6-account-create-lvnrz\" (UID: \"37d54c36-f165-4ca9-b1f6-95771e796399\") " pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.304663 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-vgv4n" podUID="bd0994da-34e6-4f4c-b8a5-cae4c7923df7" containerName="ovn-controller" probeResult="failure" output=< Nov 25 12:45:11 crc kubenswrapper[4675]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 12:45:11 crc kubenswrapper[4675]: > Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.356410 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.358688 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-zx9pg" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.392598 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.618435 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vgv4n-config-xhnrr"] Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.629200 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.641722 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.655462 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgv4n-config-xhnrr"] Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.774364 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-scripts\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.774466 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.774503 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-log-ovn\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.774530 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-additional-scripts\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.774558 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6gzg\" (UniqueName: \"kubernetes.io/projected/5744e9bb-e335-47f3-8f7b-b1977201def9-kube-api-access-t6gzg\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.774642 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run-ovn\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.871792 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4ef6-account-create-lvnrz"] Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.877758 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run-ovn\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.877852 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-scripts\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.877930 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.877967 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-log-ovn\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.877995 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-additional-scripts\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.878027 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6gzg\" (UniqueName: \"kubernetes.io/projected/5744e9bb-e335-47f3-8f7b-b1977201def9-kube-api-access-t6gzg\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.878416 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.878465 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-log-ovn\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.878663 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run-ovn\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.879188 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-additional-scripts\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.880675 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-scripts\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.908887 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6gzg\" (UniqueName: \"kubernetes.io/projected/5744e9bb-e335-47f3-8f7b-b1977201def9-kube-api-access-t6gzg\") pod \"ovn-controller-vgv4n-config-xhnrr\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:11 crc kubenswrapper[4675]: I1125 12:45:11.967802 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:12 crc kubenswrapper[4675]: I1125 12:45:12.430591 4675 generic.go:334] "Generic (PLEG): container finished" podID="37d54c36-f165-4ca9-b1f6-95771e796399" containerID="ce31f8ffd7789a4e06fba86090bc1ffd141dcc4554b9d256f8ec584572992a8f" exitCode=0 Nov 25 12:45:12 crc kubenswrapper[4675]: I1125 12:45:12.430927 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4ef6-account-create-lvnrz" event={"ID":"37d54c36-f165-4ca9-b1f6-95771e796399","Type":"ContainerDied","Data":"ce31f8ffd7789a4e06fba86090bc1ffd141dcc4554b9d256f8ec584572992a8f"} Nov 25 12:45:12 crc kubenswrapper[4675]: I1125 12:45:12.430953 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4ef6-account-create-lvnrz" event={"ID":"37d54c36-f165-4ca9-b1f6-95771e796399","Type":"ContainerStarted","Data":"353b0cdd793d8ad0bd6ca5a6af26f98afb39aca7217c10589bdebb0dd3efdbbc"} Nov 25 12:45:12 crc kubenswrapper[4675]: I1125 12:45:12.473043 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgv4n-config-xhnrr"] Nov 25 12:45:12 crc kubenswrapper[4675]: W1125 12:45:12.476325 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5744e9bb_e335_47f3_8f7b_b1977201def9.slice/crio-7346a0acc101975d3a752b555153a423984f43494ceead3d212bc3eb23affec5 WatchSource:0}: Error finding container 7346a0acc101975d3a752b555153a423984f43494ceead3d212bc3eb23affec5: Status 404 returned error can't find the container with id 7346a0acc101975d3a752b555153a423984f43494ceead3d212bc3eb23affec5 Nov 25 12:45:13 crc kubenswrapper[4675]: I1125 12:45:13.446489 4675 generic.go:334] "Generic (PLEG): container finished" podID="5744e9bb-e335-47f3-8f7b-b1977201def9" containerID="8fe10d0557dc21343c75ea1187cf8ffc2c42c40efc72eae7f5243b2511de0468" exitCode=0 Nov 25 12:45:13 crc kubenswrapper[4675]: I1125 12:45:13.447223 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n-config-xhnrr" event={"ID":"5744e9bb-e335-47f3-8f7b-b1977201def9","Type":"ContainerDied","Data":"8fe10d0557dc21343c75ea1187cf8ffc2c42c40efc72eae7f5243b2511de0468"} Nov 25 12:45:13 crc kubenswrapper[4675]: I1125 12:45:13.447249 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n-config-xhnrr" event={"ID":"5744e9bb-e335-47f3-8f7b-b1977201def9","Type":"ContainerStarted","Data":"7346a0acc101975d3a752b555153a423984f43494ceead3d212bc3eb23affec5"} Nov 25 12:45:13 crc kubenswrapper[4675]: I1125 12:45:13.748683 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:13 crc kubenswrapper[4675]: I1125 12:45:13.910777 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxknq\" (UniqueName: \"kubernetes.io/projected/37d54c36-f165-4ca9-b1f6-95771e796399-kube-api-access-xxknq\") pod \"37d54c36-f165-4ca9-b1f6-95771e796399\" (UID: \"37d54c36-f165-4ca9-b1f6-95771e796399\") " Nov 25 12:45:13 crc kubenswrapper[4675]: I1125 12:45:13.920245 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37d54c36-f165-4ca9-b1f6-95771e796399-kube-api-access-xxknq" (OuterVolumeSpecName: "kube-api-access-xxknq") pod "37d54c36-f165-4ca9-b1f6-95771e796399" (UID: "37d54c36-f165-4ca9-b1f6-95771e796399"). InnerVolumeSpecName "kube-api-access-xxknq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.013240 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxknq\" (UniqueName: \"kubernetes.io/projected/37d54c36-f165-4ca9-b1f6-95771e796399-kube-api-access-xxknq\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.456913 4675 generic.go:334] "Generic (PLEG): container finished" podID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerID="440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a" exitCode=0 Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.457012 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b","Type":"ContainerDied","Data":"440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a"} Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.460701 4675 generic.go:334] "Generic (PLEG): container finished" podID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerID="9172f56922c897a590a4b50e195c278eae03e0cde37d71e1e27ae75d11b847eb" exitCode=0 Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.460793 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"24ebc7c8-8b87-487b-90cb-7c26a047b956","Type":"ContainerDied","Data":"9172f56922c897a590a4b50e195c278eae03e0cde37d71e1e27ae75d11b847eb"} Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.467949 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4ef6-account-create-lvnrz" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.468279 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4ef6-account-create-lvnrz" event={"ID":"37d54c36-f165-4ca9-b1f6-95771e796399","Type":"ContainerDied","Data":"353b0cdd793d8ad0bd6ca5a6af26f98afb39aca7217c10589bdebb0dd3efdbbc"} Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.468319 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="353b0cdd793d8ad0bd6ca5a6af26f98afb39aca7217c10589bdebb0dd3efdbbc" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.847469 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.926297 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run-ovn\") pod \"5744e9bb-e335-47f3-8f7b-b1977201def9\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.926596 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-scripts\") pod \"5744e9bb-e335-47f3-8f7b-b1977201def9\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.926718 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run\") pod \"5744e9bb-e335-47f3-8f7b-b1977201def9\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.926419 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5744e9bb-e335-47f3-8f7b-b1977201def9" (UID: "5744e9bb-e335-47f3-8f7b-b1977201def9"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.926888 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run" (OuterVolumeSpecName: "var-run") pod "5744e9bb-e335-47f3-8f7b-b1977201def9" (UID: "5744e9bb-e335-47f3-8f7b-b1977201def9"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.927081 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-log-ovn\") pod \"5744e9bb-e335-47f3-8f7b-b1977201def9\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.927197 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5744e9bb-e335-47f3-8f7b-b1977201def9" (UID: "5744e9bb-e335-47f3-8f7b-b1977201def9"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.927344 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-additional-scripts\") pod \"5744e9bb-e335-47f3-8f7b-b1977201def9\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.927540 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6gzg\" (UniqueName: \"kubernetes.io/projected/5744e9bb-e335-47f3-8f7b-b1977201def9-kube-api-access-t6gzg\") pod \"5744e9bb-e335-47f3-8f7b-b1977201def9\" (UID: \"5744e9bb-e335-47f3-8f7b-b1977201def9\") " Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.927874 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-scripts" (OuterVolumeSpecName: "scripts") pod "5744e9bb-e335-47f3-8f7b-b1977201def9" (UID: "5744e9bb-e335-47f3-8f7b-b1977201def9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.928279 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.928375 4675 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.928488 4675 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.928576 4675 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5744e9bb-e335-47f3-8f7b-b1977201def9-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.928402 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5744e9bb-e335-47f3-8f7b-b1977201def9" (UID: "5744e9bb-e335-47f3-8f7b-b1977201def9"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:14 crc kubenswrapper[4675]: I1125 12:45:14.930887 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5744e9bb-e335-47f3-8f7b-b1977201def9-kube-api-access-t6gzg" (OuterVolumeSpecName: "kube-api-access-t6gzg") pod "5744e9bb-e335-47f3-8f7b-b1977201def9" (UID: "5744e9bb-e335-47f3-8f7b-b1977201def9"). InnerVolumeSpecName "kube-api-access-t6gzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.029710 4675 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5744e9bb-e335-47f3-8f7b-b1977201def9-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.029752 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6gzg\" (UniqueName: \"kubernetes.io/projected/5744e9bb-e335-47f3-8f7b-b1977201def9-kube-api-access-t6gzg\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.130661 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:45:15 crc kubenswrapper[4675]: E1125 12:45:15.130831 4675 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 12:45:15 crc kubenswrapper[4675]: E1125 12:45:15.130844 4675 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 12:45:15 crc kubenswrapper[4675]: E1125 12:45:15.130885 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift podName:60249dd6-be73-49eb-861a-54bb77652335 nodeName:}" failed. No retries permitted until 2025-11-25 12:45:31.130873089 +0000 UTC m=+1076.302465430 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift") pod "swift-storage-0" (UID: "60249dd6-be73-49eb-861a-54bb77652335") : configmap "swift-ring-files" not found Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.477777 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b","Type":"ContainerStarted","Data":"eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a"} Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.478112 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.481111 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"24ebc7c8-8b87-487b-90cb-7c26a047b956","Type":"ContainerStarted","Data":"9ec7f903a2f764b8fd62426ad761cb8d54f5cf6f3d514ea6503adcdc93798c10"} Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.481399 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.484673 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n-config-xhnrr" event={"ID":"5744e9bb-e335-47f3-8f7b-b1977201def9","Type":"ContainerDied","Data":"7346a0acc101975d3a752b555153a423984f43494ceead3d212bc3eb23affec5"} Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.484701 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7346a0acc101975d3a752b555153a423984f43494ceead3d212bc3eb23affec5" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.484753 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-xhnrr" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.514984 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.434036747 podStartE2EDuration="1m14.51496983s" podCreationTimestamp="2025-11-25 12:44:01 +0000 UTC" firstStartedPulling="2025-11-25 12:44:03.553419285 +0000 UTC m=+988.725011626" lastFinishedPulling="2025-11-25 12:44:40.634352368 +0000 UTC m=+1025.805944709" observedRunningTime="2025-11-25 12:45:15.50998805 +0000 UTC m=+1060.681580401" watchObservedRunningTime="2025-11-25 12:45:15.51496983 +0000 UTC m=+1060.686562171" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.560042 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.026508774 podStartE2EDuration="1m15.560023917s" podCreationTimestamp="2025-11-25 12:44:00 +0000 UTC" firstStartedPulling="2025-11-25 12:44:02.99266279 +0000 UTC m=+988.164255131" lastFinishedPulling="2025-11-25 12:44:40.526177933 +0000 UTC m=+1025.697770274" observedRunningTime="2025-11-25 12:45:15.544188488 +0000 UTC m=+1060.715780839" watchObservedRunningTime="2025-11-25 12:45:15.560023917 +0000 UTC m=+1060.731616258" Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.957987 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vgv4n-config-xhnrr"] Nov 25 12:45:15 crc kubenswrapper[4675]: I1125 12:45:15.963952 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-vgv4n-config-xhnrr"] Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.080943 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vgv4n-config-6bwqv"] Nov 25 12:45:16 crc kubenswrapper[4675]: E1125 12:45:16.081248 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d54c36-f165-4ca9-b1f6-95771e796399" containerName="mariadb-account-create" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.081261 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d54c36-f165-4ca9-b1f6-95771e796399" containerName="mariadb-account-create" Nov 25 12:45:16 crc kubenswrapper[4675]: E1125 12:45:16.081291 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5744e9bb-e335-47f3-8f7b-b1977201def9" containerName="ovn-config" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.081298 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5744e9bb-e335-47f3-8f7b-b1977201def9" containerName="ovn-config" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.081462 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="37d54c36-f165-4ca9-b1f6-95771e796399" containerName="mariadb-account-create" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.081497 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="5744e9bb-e335-47f3-8f7b-b1977201def9" containerName="ovn-config" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.082033 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.085253 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.135873 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgv4n-config-6bwqv"] Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.144845 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-additional-scripts\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.144884 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-scripts\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.144935 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run-ovn\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.144996 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-log-ovn\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.145024 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.145061 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4xpd\" (UniqueName: \"kubernetes.io/projected/30800a7c-e1e7-4a54-80cc-f563104b4a71-kube-api-access-w4xpd\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.246711 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run-ovn\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.246807 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-log-ovn\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.246846 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.246885 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4xpd\" (UniqueName: \"kubernetes.io/projected/30800a7c-e1e7-4a54-80cc-f563104b4a71-kube-api-access-w4xpd\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.246930 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-additional-scripts\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.246948 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-scripts\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.247149 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run-ovn\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.247234 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.247272 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-log-ovn\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.248181 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-additional-scripts\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.248916 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-scripts\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.268728 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4xpd\" (UniqueName: \"kubernetes.io/projected/30800a7c-e1e7-4a54-80cc-f563104b4a71-kube-api-access-w4xpd\") pod \"ovn-controller-vgv4n-config-6bwqv\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.315642 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-nqx9k"] Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.316902 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.319432 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.319597 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-zb75l" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.339481 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-nqx9k"] Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.395920 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.426564 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-vgv4n" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.450640 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-db-sync-config-data\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.450706 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mczn8\" (UniqueName: \"kubernetes.io/projected/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-kube-api-access-mczn8\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.450751 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-combined-ca-bundle\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.450785 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-config-data\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.519334 4675 generic.go:334] "Generic (PLEG): container finished" podID="959f7b20-344e-4759-8142-19a41f250c72" containerID="b4d5a7cb387d98e0656a19d6a3ef33ce589ec606e9a6910c5bf00f8de5f087e0" exitCode=0 Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.520084 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-g7rkg" event={"ID":"959f7b20-344e-4759-8142-19a41f250c72","Type":"ContainerDied","Data":"b4d5a7cb387d98e0656a19d6a3ef33ce589ec606e9a6910c5bf00f8de5f087e0"} Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.554070 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-db-sync-config-data\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.554158 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mczn8\" (UniqueName: \"kubernetes.io/projected/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-kube-api-access-mczn8\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.554211 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-combined-ca-bundle\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.554247 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-config-data\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.557981 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-db-sync-config-data\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.558597 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-combined-ca-bundle\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.559285 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-config-data\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.579435 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mczn8\" (UniqueName: \"kubernetes.io/projected/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-kube-api-access-mczn8\") pod \"glance-db-sync-nqx9k\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:16 crc kubenswrapper[4675]: I1125 12:45:16.649357 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.052149 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vgv4n-config-6bwqv"] Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.266464 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-nqx9k"] Nov 25 12:45:17 crc kubenswrapper[4675]: W1125 12:45:17.277843 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5922e4e_e904_49f3_8661_d4cfb1dbebd2.slice/crio-30bb51a999b342ff3316afd58fefd6997909183d7a0a8d91fc9c9d84b0824b26 WatchSource:0}: Error finding container 30bb51a999b342ff3316afd58fefd6997909183d7a0a8d91fc9c9d84b0824b26: Status 404 returned error can't find the container with id 30bb51a999b342ff3316afd58fefd6997909183d7a0a8d91fc9c9d84b0824b26 Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.528146 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n-config-6bwqv" event={"ID":"30800a7c-e1e7-4a54-80cc-f563104b4a71","Type":"ContainerStarted","Data":"f423bf9cd908ab5cc69f6880c12a5abd2d642ec05fa5c43c53c12aeee09edcdd"} Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.528465 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n-config-6bwqv" event={"ID":"30800a7c-e1e7-4a54-80cc-f563104b4a71","Type":"ContainerStarted","Data":"3a980f919340315cb1d998c04d28adfb565e6d3e41dfc6a9cbfffeb990a903b2"} Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.530705 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nqx9k" event={"ID":"a5922e4e-e904-49f3-8661-d4cfb1dbebd2","Type":"ContainerStarted","Data":"30bb51a999b342ff3316afd58fefd6997909183d7a0a8d91fc9c9d84b0824b26"} Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.541904 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5744e9bb-e335-47f3-8f7b-b1977201def9" path="/var/lib/kubelet/pods/5744e9bb-e335-47f3-8f7b-b1977201def9/volumes" Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.560506 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-vgv4n-config-6bwqv" podStartSLOduration=1.560481859 podStartE2EDuration="1.560481859s" podCreationTimestamp="2025-11-25 12:45:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:17.554283459 +0000 UTC m=+1062.725875800" watchObservedRunningTime="2025-11-25 12:45:17.560481859 +0000 UTC m=+1062.732074210" Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.904599 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.987965 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-scripts\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.988414 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-dispersionconf\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.988467 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-combined-ca-bundle\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.988500 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-ring-data-devices\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.988558 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99sq8\" (UniqueName: \"kubernetes.io/projected/959f7b20-344e-4759-8142-19a41f250c72-kube-api-access-99sq8\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.988609 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/959f7b20-344e-4759-8142-19a41f250c72-etc-swift\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.988670 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-swiftconf\") pod \"959f7b20-344e-4759-8142-19a41f250c72\" (UID: \"959f7b20-344e-4759-8142-19a41f250c72\") " Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.989449 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:17 crc kubenswrapper[4675]: I1125 12:45:17.989492 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/959f7b20-344e-4759-8142-19a41f250c72-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.007155 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/959f7b20-344e-4759-8142-19a41f250c72-kube-api-access-99sq8" (OuterVolumeSpecName: "kube-api-access-99sq8") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "kube-api-access-99sq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.013069 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.032974 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.033061 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.042515 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-scripts" (OuterVolumeSpecName: "scripts") pod "959f7b20-344e-4759-8142-19a41f250c72" (UID: "959f7b20-344e-4759-8142-19a41f250c72"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090514 4675 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090550 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090558 4675 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090569 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959f7b20-344e-4759-8142-19a41f250c72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090579 4675 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/959f7b20-344e-4759-8142-19a41f250c72-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090587 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99sq8\" (UniqueName: \"kubernetes.io/projected/959f7b20-344e-4759-8142-19a41f250c72-kube-api-access-99sq8\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.090596 4675 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/959f7b20-344e-4759-8142-19a41f250c72-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.541906 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-g7rkg" event={"ID":"959f7b20-344e-4759-8142-19a41f250c72","Type":"ContainerDied","Data":"8c4fa6777e8cbd21704c8cad29ce5d4d5d4feb36d7f47fcd8117a97bbb1b0928"} Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.541941 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c4fa6777e8cbd21704c8cad29ce5d4d5d4feb36d7f47fcd8117a97bbb1b0928" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.542001 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-g7rkg" Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.545027 4675 generic.go:334] "Generic (PLEG): container finished" podID="30800a7c-e1e7-4a54-80cc-f563104b4a71" containerID="f423bf9cd908ab5cc69f6880c12a5abd2d642ec05fa5c43c53c12aeee09edcdd" exitCode=0 Nov 25 12:45:18 crc kubenswrapper[4675]: I1125 12:45:18.545063 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vgv4n-config-6bwqv" event={"ID":"30800a7c-e1e7-4a54-80cc-f563104b4a71","Type":"ContainerDied","Data":"f423bf9cd908ab5cc69f6880c12a5abd2d642ec05fa5c43c53c12aeee09edcdd"} Nov 25 12:45:19 crc kubenswrapper[4675]: I1125 12:45:19.932983 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.032453 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xpd\" (UniqueName: \"kubernetes.io/projected/30800a7c-e1e7-4a54-80cc-f563104b4a71-kube-api-access-w4xpd\") pod \"30800a7c-e1e7-4a54-80cc-f563104b4a71\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.032853 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-additional-scripts\") pod \"30800a7c-e1e7-4a54-80cc-f563104b4a71\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.032901 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run\") pod \"30800a7c-e1e7-4a54-80cc-f563104b4a71\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.032938 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run-ovn\") pod \"30800a7c-e1e7-4a54-80cc-f563104b4a71\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033038 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run" (OuterVolumeSpecName: "var-run") pod "30800a7c-e1e7-4a54-80cc-f563104b4a71" (UID: "30800a7c-e1e7-4a54-80cc-f563104b4a71"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033054 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-log-ovn\") pod \"30800a7c-e1e7-4a54-80cc-f563104b4a71\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033083 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "30800a7c-e1e7-4a54-80cc-f563104b4a71" (UID: "30800a7c-e1e7-4a54-80cc-f563104b4a71"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033104 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "30800a7c-e1e7-4a54-80cc-f563104b4a71" (UID: "30800a7c-e1e7-4a54-80cc-f563104b4a71"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033214 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-scripts\") pod \"30800a7c-e1e7-4a54-80cc-f563104b4a71\" (UID: \"30800a7c-e1e7-4a54-80cc-f563104b4a71\") " Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033769 4675 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033787 4675 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.033796 4675 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30800a7c-e1e7-4a54-80cc-f563104b4a71-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.034084 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "30800a7c-e1e7-4a54-80cc-f563104b4a71" (UID: "30800a7c-e1e7-4a54-80cc-f563104b4a71"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.034479 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-scripts" (OuterVolumeSpecName: "scripts") pod "30800a7c-e1e7-4a54-80cc-f563104b4a71" (UID: "30800a7c-e1e7-4a54-80cc-f563104b4a71"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.056468 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30800a7c-e1e7-4a54-80cc-f563104b4a71-kube-api-access-w4xpd" (OuterVolumeSpecName: "kube-api-access-w4xpd") pod "30800a7c-e1e7-4a54-80cc-f563104b4a71" (UID: "30800a7c-e1e7-4a54-80cc-f563104b4a71"). InnerVolumeSpecName "kube-api-access-w4xpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.128617 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vgv4n-config-6bwqv"] Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.134914 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.134953 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xpd\" (UniqueName: \"kubernetes.io/projected/30800a7c-e1e7-4a54-80cc-f563104b4a71-kube-api-access-w4xpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.134966 4675 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/30800a7c-e1e7-4a54-80cc-f563104b4a71-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.139655 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-vgv4n-config-6bwqv"] Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.564879 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a980f919340315cb1d998c04d28adfb565e6d3e41dfc6a9cbfffeb990a903b2" Nov 25 12:45:20 crc kubenswrapper[4675]: I1125 12:45:20.564944 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vgv4n-config-6bwqv" Nov 25 12:45:21 crc kubenswrapper[4675]: I1125 12:45:21.545776 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30800a7c-e1e7-4a54-80cc-f563104b4a71" path="/var/lib/kubelet/pods/30800a7c-e1e7-4a54-80cc-f563104b4a71/volumes" Nov 25 12:45:30 crc kubenswrapper[4675]: I1125 12:45:30.669645 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nqx9k" event={"ID":"a5922e4e-e904-49f3-8661-d4cfb1dbebd2","Type":"ContainerStarted","Data":"c50b4a4f865e84d2378b4dc7db967c21116952450827df54eb31dbaf87ce67e3"} Nov 25 12:45:30 crc kubenswrapper[4675]: I1125 12:45:30.690255 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-nqx9k" podStartSLOduration=2.282355541 podStartE2EDuration="14.690235137s" podCreationTimestamp="2025-11-25 12:45:16 +0000 UTC" firstStartedPulling="2025-11-25 12:45:17.280604887 +0000 UTC m=+1062.452197228" lastFinishedPulling="2025-11-25 12:45:29.688484493 +0000 UTC m=+1074.860076824" observedRunningTime="2025-11-25 12:45:30.683178841 +0000 UTC m=+1075.854771182" watchObservedRunningTime="2025-11-25 12:45:30.690235137 +0000 UTC m=+1075.861827478" Nov 25 12:45:31 crc kubenswrapper[4675]: I1125 12:45:31.225431 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:45:31 crc kubenswrapper[4675]: I1125 12:45:31.233307 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/60249dd6-be73-49eb-861a-54bb77652335-etc-swift\") pod \"swift-storage-0\" (UID: \"60249dd6-be73-49eb-861a-54bb77652335\") " pod="openstack/swift-storage-0" Nov 25 12:45:31 crc kubenswrapper[4675]: I1125 12:45:31.264286 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 12:45:31 crc kubenswrapper[4675]: I1125 12:45:31.811767 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 12:45:31 crc kubenswrapper[4675]: W1125 12:45:31.825123 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60249dd6_be73_49eb_861a_54bb77652335.slice/crio-e6ecf0dc593e91528b297b07eae072ccca89219f5d048d7b3dcbab2fa6198e80 WatchSource:0}: Error finding container e6ecf0dc593e91528b297b07eae072ccca89219f5d048d7b3dcbab2fa6198e80: Status 404 returned error can't find the container with id e6ecf0dc593e91528b297b07eae072ccca89219f5d048d7b3dcbab2fa6198e80 Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.315033 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.633672 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-vtdh7"] Nov 25 12:45:32 crc kubenswrapper[4675]: E1125 12:45:32.633991 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30800a7c-e1e7-4a54-80cc-f563104b4a71" containerName="ovn-config" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.634003 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="30800a7c-e1e7-4a54-80cc-f563104b4a71" containerName="ovn-config" Nov 25 12:45:32 crc kubenswrapper[4675]: E1125 12:45:32.634022 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="959f7b20-344e-4759-8142-19a41f250c72" containerName="swift-ring-rebalance" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.634028 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="959f7b20-344e-4759-8142-19a41f250c72" containerName="swift-ring-rebalance" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.634172 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="959f7b20-344e-4759-8142-19a41f250c72" containerName="swift-ring-rebalance" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.634186 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="30800a7c-e1e7-4a54-80cc-f563104b4a71" containerName="ovn-config" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.634672 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.697209 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vtdh7"] Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.700775 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"e6ecf0dc593e91528b297b07eae072ccca89219f5d048d7b3dcbab2fa6198e80"} Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.727893 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.753954 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thjbj\" (UniqueName: \"kubernetes.io/projected/db9b274e-70fb-4e9d-bd32-28143b3b00f4-kube-api-access-thjbj\") pod \"cinder-db-create-vtdh7\" (UID: \"db9b274e-70fb-4e9d-bd32-28143b3b00f4\") " pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.855853 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thjbj\" (UniqueName: \"kubernetes.io/projected/db9b274e-70fb-4e9d-bd32-28143b3b00f4-kube-api-access-thjbj\") pod \"cinder-db-create-vtdh7\" (UID: \"db9b274e-70fb-4e9d-bd32-28143b3b00f4\") " pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.897867 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thjbj\" (UniqueName: \"kubernetes.io/projected/db9b274e-70fb-4e9d-bd32-28143b3b00f4-kube-api-access-thjbj\") pod \"cinder-db-create-vtdh7\" (UID: \"db9b274e-70fb-4e9d-bd32-28143b3b00f4\") " pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:32 crc kubenswrapper[4675]: I1125 12:45:32.963156 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.032433 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-7kq2m"] Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.034265 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.075668 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxzl2\" (UniqueName: \"kubernetes.io/projected/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3-kube-api-access-jxzl2\") pod \"barbican-db-create-7kq2m\" (UID: \"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3\") " pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.128873 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7kq2m"] Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.186029 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxzl2\" (UniqueName: \"kubernetes.io/projected/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3-kube-api-access-jxzl2\") pod \"barbican-db-create-7kq2m\" (UID: \"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3\") " pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.248028 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxzl2\" (UniqueName: \"kubernetes.io/projected/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3-kube-api-access-jxzl2\") pod \"barbican-db-create-7kq2m\" (UID: \"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3\") " pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.412734 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-pbf54"] Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.413763 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.415706 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.421801 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-sl4fq"] Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.427417 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-69vb7" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.427642 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.427738 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.429230 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.431126 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.444997 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pbf54"] Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.480974 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-sl4fq"] Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.492231 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv24q\" (UniqueName: \"kubernetes.io/projected/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6-kube-api-access-lv24q\") pod \"neutron-db-create-sl4fq\" (UID: \"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6\") " pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.492299 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnb9h\" (UniqueName: \"kubernetes.io/projected/d872b841-9c08-4120-880d-1d2803c8e3bd-kube-api-access-mnb9h\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.492409 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-config-data\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.492449 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-combined-ca-bundle\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.596081 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnb9h\" (UniqueName: \"kubernetes.io/projected/d872b841-9c08-4120-880d-1d2803c8e3bd-kube-api-access-mnb9h\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.596232 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-config-data\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.596289 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-combined-ca-bundle\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.596320 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv24q\" (UniqueName: \"kubernetes.io/projected/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6-kube-api-access-lv24q\") pod \"neutron-db-create-sl4fq\" (UID: \"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6\") " pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.605776 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-config-data\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.607169 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-combined-ca-bundle\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.637388 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnb9h\" (UniqueName: \"kubernetes.io/projected/d872b841-9c08-4120-880d-1d2803c8e3bd-kube-api-access-mnb9h\") pod \"keystone-db-sync-pbf54\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:33 crc kubenswrapper[4675]: I1125 12:45:33.639872 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv24q\" (UniqueName: \"kubernetes.io/projected/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6-kube-api-access-lv24q\") pod \"neutron-db-create-sl4fq\" (UID: \"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6\") " pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:34 crc kubenswrapper[4675]: I1125 12:45:33.763660 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:34 crc kubenswrapper[4675]: I1125 12:45:33.819666 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:34 crc kubenswrapper[4675]: I1125 12:45:33.906321 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vtdh7"] Nov 25 12:45:34 crc kubenswrapper[4675]: W1125 12:45:33.919097 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb9b274e_70fb_4e9d_bd32_28143b3b00f4.slice/crio-8f27a58089e9a07553587d65c3a5f845fef5bb9aa2dcbd2db3668c2eb18b2cca WatchSource:0}: Error finding container 8f27a58089e9a07553587d65c3a5f845fef5bb9aa2dcbd2db3668c2eb18b2cca: Status 404 returned error can't find the container with id 8f27a58089e9a07553587d65c3a5f845fef5bb9aa2dcbd2db3668c2eb18b2cca Nov 25 12:45:34 crc kubenswrapper[4675]: I1125 12:45:34.722597 4675 generic.go:334] "Generic (PLEG): container finished" podID="db9b274e-70fb-4e9d-bd32-28143b3b00f4" containerID="f8e444b9e9e360b1918bbc1887ae55d686dedc8d934eb9f76eb42466dbc93553" exitCode=0 Nov 25 12:45:34 crc kubenswrapper[4675]: I1125 12:45:34.722917 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vtdh7" event={"ID":"db9b274e-70fb-4e9d-bd32-28143b3b00f4","Type":"ContainerDied","Data":"f8e444b9e9e360b1918bbc1887ae55d686dedc8d934eb9f76eb42466dbc93553"} Nov 25 12:45:34 crc kubenswrapper[4675]: I1125 12:45:34.722977 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vtdh7" event={"ID":"db9b274e-70fb-4e9d-bd32-28143b3b00f4","Type":"ContainerStarted","Data":"8f27a58089e9a07553587d65c3a5f845fef5bb9aa2dcbd2db3668c2eb18b2cca"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.332450 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-sl4fq"] Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.365051 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pbf54"] Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.391926 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7kq2m"] Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.761709 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7kq2m" event={"ID":"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3","Type":"ContainerStarted","Data":"d7097e021163ceb2bd3f382a6ee111d6f42703173966b5f4b7fab5896dee1781"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.774049 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"a8e982b222b26a3af527168e86e11c37bbbd3ba3746055c51eea664bb4e1a0ce"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.774094 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"f5ef0cd1a4cda259653125cd53afbccb3e796d148bde84986103d215c1aa737e"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.777928 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbf54" event={"ID":"d872b841-9c08-4120-880d-1d2803c8e3bd","Type":"ContainerStarted","Data":"a06ea90b247317f97fb262de00469640c97b357d539e9b2f17e1145117fd7b77"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.790035 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sl4fq" event={"ID":"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6","Type":"ContainerStarted","Data":"853760c5e6c1422284de2885c97a93f75c559cf13f3fb457373aa0059258661c"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.790065 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sl4fq" event={"ID":"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6","Type":"ContainerStarted","Data":"da03005c285ed44a16c34f698c20f5a7578acf90d022e45c2e70233ae96442f4"} Nov 25 12:45:35 crc kubenswrapper[4675]: I1125 12:45:35.808309 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-sl4fq" podStartSLOduration=2.808290522 podStartE2EDuration="2.808290522s" podCreationTimestamp="2025-11-25 12:45:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:35.807870599 +0000 UTC m=+1080.979462940" watchObservedRunningTime="2025-11-25 12:45:35.808290522 +0000 UTC m=+1080.979882863" Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.364070 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.480890 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thjbj\" (UniqueName: \"kubernetes.io/projected/db9b274e-70fb-4e9d-bd32-28143b3b00f4-kube-api-access-thjbj\") pod \"db9b274e-70fb-4e9d-bd32-28143b3b00f4\" (UID: \"db9b274e-70fb-4e9d-bd32-28143b3b00f4\") " Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.493926 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db9b274e-70fb-4e9d-bd32-28143b3b00f4-kube-api-access-thjbj" (OuterVolumeSpecName: "kube-api-access-thjbj") pod "db9b274e-70fb-4e9d-bd32-28143b3b00f4" (UID: "db9b274e-70fb-4e9d-bd32-28143b3b00f4"). InnerVolumeSpecName "kube-api-access-thjbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.583093 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thjbj\" (UniqueName: \"kubernetes.io/projected/db9b274e-70fb-4e9d-bd32-28143b3b00f4-kube-api-access-thjbj\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.799547 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"c60d6d7d6ddf6219aadc9f5dd82ca5271ce9f549519e4b9d6f07cb94324153d5"} Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.799917 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"fd06496ecf7bde1e6cdbf8447ad63bc17d4bed1482b2d3a99bd6b70b1b6c80f8"} Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.801930 4675 generic.go:334] "Generic (PLEG): container finished" podID="3fb050e8-6d74-4c4e-aa11-a5e86f109fb6" containerID="853760c5e6c1422284de2885c97a93f75c559cf13f3fb457373aa0059258661c" exitCode=0 Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.802036 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sl4fq" event={"ID":"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6","Type":"ContainerDied","Data":"853760c5e6c1422284de2885c97a93f75c559cf13f3fb457373aa0059258661c"} Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.803412 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vtdh7" event={"ID":"db9b274e-70fb-4e9d-bd32-28143b3b00f4","Type":"ContainerDied","Data":"8f27a58089e9a07553587d65c3a5f845fef5bb9aa2dcbd2db3668c2eb18b2cca"} Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.803455 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f27a58089e9a07553587d65c3a5f845fef5bb9aa2dcbd2db3668c2eb18b2cca" Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.803516 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vtdh7" Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.807284 4675 generic.go:334] "Generic (PLEG): container finished" podID="094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3" containerID="ec5200d7880a086a872e91be8529cb051438d66d79987b8c7cf6a97bb92605ac" exitCode=0 Nov 25 12:45:36 crc kubenswrapper[4675]: I1125 12:45:36.807327 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7kq2m" event={"ID":"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3","Type":"ContainerDied","Data":"ec5200d7880a086a872e91be8529cb051438d66d79987b8c7cf6a97bb92605ac"} Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.410363 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.416953 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.509794 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv24q\" (UniqueName: \"kubernetes.io/projected/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6-kube-api-access-lv24q\") pod \"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6\" (UID: \"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6\") " Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.510024 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxzl2\" (UniqueName: \"kubernetes.io/projected/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3-kube-api-access-jxzl2\") pod \"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3\" (UID: \"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3\") " Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.519243 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6-kube-api-access-lv24q" (OuterVolumeSpecName: "kube-api-access-lv24q") pod "3fb050e8-6d74-4c4e-aa11-a5e86f109fb6" (UID: "3fb050e8-6d74-4c4e-aa11-a5e86f109fb6"). InnerVolumeSpecName "kube-api-access-lv24q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.522224 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3-kube-api-access-jxzl2" (OuterVolumeSpecName: "kube-api-access-jxzl2") pod "094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3" (UID: "094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3"). InnerVolumeSpecName "kube-api-access-jxzl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.628384 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv24q\" (UniqueName: \"kubernetes.io/projected/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6-kube-api-access-lv24q\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.628634 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxzl2\" (UniqueName: \"kubernetes.io/projected/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3-kube-api-access-jxzl2\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.825897 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sl4fq" event={"ID":"3fb050e8-6d74-4c4e-aa11-a5e86f109fb6","Type":"ContainerDied","Data":"da03005c285ed44a16c34f698c20f5a7578acf90d022e45c2e70233ae96442f4"} Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.825942 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da03005c285ed44a16c34f698c20f5a7578acf90d022e45c2e70233ae96442f4" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.826010 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sl4fq" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.829501 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7kq2m" event={"ID":"094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3","Type":"ContainerDied","Data":"d7097e021163ceb2bd3f382a6ee111d6f42703173966b5f4b7fab5896dee1781"} Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.829532 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7097e021163ceb2bd3f382a6ee111d6f42703173966b5f4b7fab5896dee1781" Nov 25 12:45:38 crc kubenswrapper[4675]: I1125 12:45:38.829554 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7kq2m" Nov 25 12:45:41 crc kubenswrapper[4675]: I1125 12:45:41.855914 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"91de94768a76ed8c9fbe8450c06c550575c1154a726c009b03a829cac02003d8"} Nov 25 12:45:41 crc kubenswrapper[4675]: I1125 12:45:41.857567 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbf54" event={"ID":"d872b841-9c08-4120-880d-1d2803c8e3bd","Type":"ContainerStarted","Data":"51ba83037e6556e175eacbf5a835e0dbab44b348701b90877519b4d7c93501c3"} Nov 25 12:45:41 crc kubenswrapper[4675]: I1125 12:45:41.884686 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-pbf54" podStartSLOduration=2.7398004929999997 podStartE2EDuration="8.884654158s" podCreationTimestamp="2025-11-25 12:45:33 +0000 UTC" firstStartedPulling="2025-11-25 12:45:35.400938395 +0000 UTC m=+1080.572530736" lastFinishedPulling="2025-11-25 12:45:41.54579206 +0000 UTC m=+1086.717384401" observedRunningTime="2025-11-25 12:45:41.882978923 +0000 UTC m=+1087.054571264" watchObservedRunningTime="2025-11-25 12:45:41.884654158 +0000 UTC m=+1087.056246499" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.805625 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-2b6e-account-create-5pb28"] Nov 25 12:45:42 crc kubenswrapper[4675]: E1125 12:45:42.806316 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db9b274e-70fb-4e9d-bd32-28143b3b00f4" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.806335 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="db9b274e-70fb-4e9d-bd32-28143b3b00f4" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: E1125 12:45:42.806358 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fb050e8-6d74-4c4e-aa11-a5e86f109fb6" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.806366 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fb050e8-6d74-4c4e-aa11-a5e86f109fb6" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: E1125 12:45:42.806381 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.806388 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.806579 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.806616 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fb050e8-6d74-4c4e-aa11-a5e86f109fb6" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.806640 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="db9b274e-70fb-4e9d-bd32-28143b3b00f4" containerName="mariadb-database-create" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.807529 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.810206 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.864190 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-2b6e-account-create-5pb28"] Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.877349 4675 generic.go:334] "Generic (PLEG): container finished" podID="a5922e4e-e904-49f3-8661-d4cfb1dbebd2" containerID="c50b4a4f865e84d2378b4dc7db967c21116952450827df54eb31dbaf87ce67e3" exitCode=0 Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.877424 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nqx9k" event={"ID":"a5922e4e-e904-49f3-8661-d4cfb1dbebd2","Type":"ContainerDied","Data":"c50b4a4f865e84d2378b4dc7db967c21116952450827df54eb31dbaf87ce67e3"} Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.881783 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"e07bff306f9a25fcf27fb0a024fe7c1170b5ef76d7c9231fb276ad5af37f1588"} Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.881851 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"401a45809166d2683480469e670553f6561ed2982fa947b6667068ceb95782dd"} Nov 25 12:45:42 crc kubenswrapper[4675]: I1125 12:45:42.903200 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cg84\" (UniqueName: \"kubernetes.io/projected/398aafc4-3b57-4573-a72a-1b71b3a79383-kube-api-access-7cg84\") pod \"cinder-2b6e-account-create-5pb28\" (UID: \"398aafc4-3b57-4573-a72a-1b71b3a79383\") " pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.004934 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cg84\" (UniqueName: \"kubernetes.io/projected/398aafc4-3b57-4573-a72a-1b71b3a79383-kube-api-access-7cg84\") pod \"cinder-2b6e-account-create-5pb28\" (UID: \"398aafc4-3b57-4573-a72a-1b71b3a79383\") " pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.028534 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cg84\" (UniqueName: \"kubernetes.io/projected/398aafc4-3b57-4573-a72a-1b71b3a79383-kube-api-access-7cg84\") pod \"cinder-2b6e-account-create-5pb28\" (UID: \"398aafc4-3b57-4573-a72a-1b71b3a79383\") " pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.139549 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.580952 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-2b6e-account-create-5pb28"] Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.662375 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.662437 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.891706 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"608824ac7a90d9b9db25282debbfeec270d7307648031a2b36f7f784d8a9e5b2"} Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.894174 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2b6e-account-create-5pb28" event={"ID":"398aafc4-3b57-4573-a72a-1b71b3a79383","Type":"ContainerStarted","Data":"c2500178a20e65a654904b8164b1d955d20d4445d51534a1ce4d5d7f655c58ac"} Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.894198 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2b6e-account-create-5pb28" event={"ID":"398aafc4-3b57-4573-a72a-1b71b3a79383","Type":"ContainerStarted","Data":"a1d7a9d3a805fbd8f534a51b3ad83de647bf147e8a3295be2570401ef3a059d3"} Nov 25 12:45:43 crc kubenswrapper[4675]: I1125 12:45:43.907601 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-2b6e-account-create-5pb28" podStartSLOduration=1.907580751 podStartE2EDuration="1.907580751s" podCreationTimestamp="2025-11-25 12:45:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:43.907123866 +0000 UTC m=+1089.078716227" watchObservedRunningTime="2025-11-25 12:45:43.907580751 +0000 UTC m=+1089.079173092" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.298447 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.326883 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-combined-ca-bundle\") pod \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.327080 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-config-data\") pod \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.327269 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mczn8\" (UniqueName: \"kubernetes.io/projected/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-kube-api-access-mczn8\") pod \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.327455 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-db-sync-config-data\") pod \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\" (UID: \"a5922e4e-e904-49f3-8661-d4cfb1dbebd2\") " Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.332896 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a5922e4e-e904-49f3-8661-d4cfb1dbebd2" (UID: "a5922e4e-e904-49f3-8661-d4cfb1dbebd2"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.336321 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-kube-api-access-mczn8" (OuterVolumeSpecName: "kube-api-access-mczn8") pod "a5922e4e-e904-49f3-8661-d4cfb1dbebd2" (UID: "a5922e4e-e904-49f3-8661-d4cfb1dbebd2"). InnerVolumeSpecName "kube-api-access-mczn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.361074 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5922e4e-e904-49f3-8661-d4cfb1dbebd2" (UID: "a5922e4e-e904-49f3-8661-d4cfb1dbebd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.371446 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-config-data" (OuterVolumeSpecName: "config-data") pod "a5922e4e-e904-49f3-8661-d4cfb1dbebd2" (UID: "a5922e4e-e904-49f3-8661-d4cfb1dbebd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.434040 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mczn8\" (UniqueName: \"kubernetes.io/projected/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-kube-api-access-mczn8\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.434069 4675 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.434079 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.434088 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5922e4e-e904-49f3-8661-d4cfb1dbebd2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.908681 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nqx9k" event={"ID":"a5922e4e-e904-49f3-8661-d4cfb1dbebd2","Type":"ContainerDied","Data":"30bb51a999b342ff3316afd58fefd6997909183d7a0a8d91fc9c9d84b0824b26"} Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.908722 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30bb51a999b342ff3316afd58fefd6997909183d7a0a8d91fc9c9d84b0824b26" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.908813 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nqx9k" Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.910345 4675 generic.go:334] "Generic (PLEG): container finished" podID="398aafc4-3b57-4573-a72a-1b71b3a79383" containerID="c2500178a20e65a654904b8164b1d955d20d4445d51534a1ce4d5d7f655c58ac" exitCode=0 Nov 25 12:45:44 crc kubenswrapper[4675]: I1125 12:45:44.910382 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2b6e-account-create-5pb28" event={"ID":"398aafc4-3b57-4573-a72a-1b71b3a79383","Type":"ContainerDied","Data":"c2500178a20e65a654904b8164b1d955d20d4445d51534a1ce4d5d7f655c58ac"} Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.605568 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-6vb8z"] Nov 25 12:45:45 crc kubenswrapper[4675]: E1125 12:45:45.606352 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5922e4e-e904-49f3-8661-d4cfb1dbebd2" containerName="glance-db-sync" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.606375 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5922e4e-e904-49f3-8661-d4cfb1dbebd2" containerName="glance-db-sync" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.606583 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5922e4e-e904-49f3-8661-d4cfb1dbebd2" containerName="glance-db-sync" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.607691 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.611408 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-6vb8z"] Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.660092 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-config\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.660306 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2g5j\" (UniqueName: \"kubernetes.io/projected/98adc754-8753-494a-8083-0d98351c876b-kube-api-access-l2g5j\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.660387 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.660483 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.660690 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.762531 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.762622 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.762674 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-config\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.762690 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2g5j\" (UniqueName: \"kubernetes.io/projected/98adc754-8753-494a-8083-0d98351c876b-kube-api-access-l2g5j\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.762711 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.763535 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.763646 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.764161 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.764380 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-config\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.802481 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2g5j\" (UniqueName: \"kubernetes.io/projected/98adc754-8753-494a-8083-0d98351c876b-kube-api-access-l2g5j\") pod \"dnsmasq-dns-5b946c75cc-6vb8z\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:45 crc kubenswrapper[4675]: I1125 12:45:45.929775 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:46 crc kubenswrapper[4675]: W1125 12:45:46.571203 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98adc754_8753_494a_8083_0d98351c876b.slice/crio-68b8c6a47d91f8614abfc636db8d08ecf29c7b461f9d8d7f9b9b628836cd3ef0 WatchSource:0}: Error finding container 68b8c6a47d91f8614abfc636db8d08ecf29c7b461f9d8d7f9b9b628836cd3ef0: Status 404 returned error can't find the container with id 68b8c6a47d91f8614abfc636db8d08ecf29c7b461f9d8d7f9b9b628836cd3ef0 Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.572685 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-6vb8z"] Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.612321 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.680379 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cg84\" (UniqueName: \"kubernetes.io/projected/398aafc4-3b57-4573-a72a-1b71b3a79383-kube-api-access-7cg84\") pod \"398aafc4-3b57-4573-a72a-1b71b3a79383\" (UID: \"398aafc4-3b57-4573-a72a-1b71b3a79383\") " Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.687229 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/398aafc4-3b57-4573-a72a-1b71b3a79383-kube-api-access-7cg84" (OuterVolumeSpecName: "kube-api-access-7cg84") pod "398aafc4-3b57-4573-a72a-1b71b3a79383" (UID: "398aafc4-3b57-4573-a72a-1b71b3a79383"). InnerVolumeSpecName "kube-api-access-7cg84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.782530 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cg84\" (UniqueName: \"kubernetes.io/projected/398aafc4-3b57-4573-a72a-1b71b3a79383-kube-api-access-7cg84\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.925082 4675 generic.go:334] "Generic (PLEG): container finished" podID="98adc754-8753-494a-8083-0d98351c876b" containerID="c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec" exitCode=0 Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.925315 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" event={"ID":"98adc754-8753-494a-8083-0d98351c876b","Type":"ContainerDied","Data":"c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec"} Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.925462 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" event={"ID":"98adc754-8753-494a-8083-0d98351c876b","Type":"ContainerStarted","Data":"68b8c6a47d91f8614abfc636db8d08ecf29c7b461f9d8d7f9b9b628836cd3ef0"} Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.927307 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2b6e-account-create-5pb28" Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.927329 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2b6e-account-create-5pb28" event={"ID":"398aafc4-3b57-4573-a72a-1b71b3a79383","Type":"ContainerDied","Data":"a1d7a9d3a805fbd8f534a51b3ad83de647bf147e8a3295be2570401ef3a059d3"} Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.927363 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1d7a9d3a805fbd8f534a51b3ad83de647bf147e8a3295be2570401ef3a059d3" Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.935883 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"9a1d988eb2fcef17a44a366c50bd62c6edf5bda9f5dc9c5ec7d842a24bead8af"} Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.935920 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"ad6b059e7bfcdbabef786e9ca32220b631c768612e3c6b2e4f2e7d9eaa53fd6d"} Nov 25 12:45:46 crc kubenswrapper[4675]: I1125 12:45:46.935929 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"c2af96c7a1f88753c82401d50b7f76f6380a16ad1d1f405f72f9db81aee7dedf"} Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.951223 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"07166e4d6cc86be1f34409a716d1948972ebd93ab763770ec0e453586b2562c5"} Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.951557 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"1429cc62b92cbd78ae7891dd9d692ecdd36b31a7249d880d2e00c73bd5edb482"} Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.951574 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"869e0c8e67f62e2fa8c6805fdd76c01c25c6c7a45631ea21d64e7d5cc94e05dc"} Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.951586 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"60249dd6-be73-49eb-861a-54bb77652335","Type":"ContainerStarted","Data":"94309e474ed5efbd38704f3274e12022243672bbd90e727d7c79257e231f0b00"} Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.954300 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" event={"ID":"98adc754-8753-494a-8083-0d98351c876b","Type":"ContainerStarted","Data":"872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e"} Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.954473 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:47 crc kubenswrapper[4675]: I1125 12:45:47.990596 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.09878925 podStartE2EDuration="49.990580202s" podCreationTimestamp="2025-11-25 12:44:58 +0000 UTC" firstStartedPulling="2025-11-25 12:45:31.82761774 +0000 UTC m=+1076.999210081" lastFinishedPulling="2025-11-25 12:45:45.719408692 +0000 UTC m=+1090.891001033" observedRunningTime="2025-11-25 12:45:47.981994816 +0000 UTC m=+1093.153587167" watchObservedRunningTime="2025-11-25 12:45:47.990580202 +0000 UTC m=+1093.162172543" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.015589 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" podStartSLOduration=3.015566795 podStartE2EDuration="3.015566795s" podCreationTimestamp="2025-11-25 12:45:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:48.00670133 +0000 UTC m=+1093.178293661" watchObservedRunningTime="2025-11-25 12:45:48.015566795 +0000 UTC m=+1093.187159136" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.289228 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-6vb8z"] Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.323411 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-xcvr9"] Nov 25 12:45:48 crc kubenswrapper[4675]: E1125 12:45:48.323805 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="398aafc4-3b57-4573-a72a-1b71b3a79383" containerName="mariadb-account-create" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.323859 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="398aafc4-3b57-4573-a72a-1b71b3a79383" containerName="mariadb-account-create" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.324023 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="398aafc4-3b57-4573-a72a-1b71b3a79383" containerName="mariadb-account-create" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.324937 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.327732 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.341716 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-xcvr9"] Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.506299 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvfp4\" (UniqueName: \"kubernetes.io/projected/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-kube-api-access-gvfp4\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.506344 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.506362 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.506595 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.506731 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.506792 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-config\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.608510 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvfp4\" (UniqueName: \"kubernetes.io/projected/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-kube-api-access-gvfp4\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.608573 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.608595 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.608673 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.608726 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.608759 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-config\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.609657 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.609693 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.609710 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.609864 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-config\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.609877 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.639056 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvfp4\" (UniqueName: \"kubernetes.io/projected/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-kube-api-access-gvfp4\") pod \"dnsmasq-dns-74f6bcbc87-xcvr9\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.651690 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.964394 4675 generic.go:334] "Generic (PLEG): container finished" podID="d872b841-9c08-4120-880d-1d2803c8e3bd" containerID="51ba83037e6556e175eacbf5a835e0dbab44b348701b90877519b4d7c93501c3" exitCode=0 Nov 25 12:45:48 crc kubenswrapper[4675]: I1125 12:45:48.965191 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbf54" event={"ID":"d872b841-9c08-4120-880d-1d2803c8e3bd","Type":"ContainerDied","Data":"51ba83037e6556e175eacbf5a835e0dbab44b348701b90877519b4d7c93501c3"} Nov 25 12:45:49 crc kubenswrapper[4675]: I1125 12:45:49.101730 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-xcvr9"] Nov 25 12:45:49 crc kubenswrapper[4675]: I1125 12:45:49.974897 4675 generic.go:334] "Generic (PLEG): container finished" podID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerID="23a392802ed3ecd94db738cbf963af76d9ab90471c90a72fb467074c61bdc14f" exitCode=0 Nov 25 12:45:49 crc kubenswrapper[4675]: I1125 12:45:49.975006 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" event={"ID":"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef","Type":"ContainerDied","Data":"23a392802ed3ecd94db738cbf963af76d9ab90471c90a72fb467074c61bdc14f"} Nov 25 12:45:49 crc kubenswrapper[4675]: I1125 12:45:49.975342 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" event={"ID":"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef","Type":"ContainerStarted","Data":"1ada2aa3ba143b26c2ae490c784f8996b3f524a60468a135a64f3d7789a280b2"} Nov 25 12:45:49 crc kubenswrapper[4675]: I1125 12:45:49.975557 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" podUID="98adc754-8753-494a-8083-0d98351c876b" containerName="dnsmasq-dns" containerID="cri-o://872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e" gracePeriod=10 Nov 25 12:45:50 crc kubenswrapper[4675]: E1125 12:45:50.189935 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98adc754_8753_494a_8083_0d98351c876b.slice/crio-872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e.scope\": RecentStats: unable to find data in memory cache]" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.474569 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.482153 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.646256 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-config-data\") pod \"d872b841-9c08-4120-880d-1d2803c8e3bd\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.646309 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-sb\") pod \"98adc754-8753-494a-8083-0d98351c876b\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.646384 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2g5j\" (UniqueName: \"kubernetes.io/projected/98adc754-8753-494a-8083-0d98351c876b-kube-api-access-l2g5j\") pod \"98adc754-8753-494a-8083-0d98351c876b\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.646439 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-nb\") pod \"98adc754-8753-494a-8083-0d98351c876b\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.646466 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnb9h\" (UniqueName: \"kubernetes.io/projected/d872b841-9c08-4120-880d-1d2803c8e3bd-kube-api-access-mnb9h\") pod \"d872b841-9c08-4120-880d-1d2803c8e3bd\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.647074 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-config\") pod \"98adc754-8753-494a-8083-0d98351c876b\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.647368 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-combined-ca-bundle\") pod \"d872b841-9c08-4120-880d-1d2803c8e3bd\" (UID: \"d872b841-9c08-4120-880d-1d2803c8e3bd\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.647391 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-dns-svc\") pod \"98adc754-8753-494a-8083-0d98351c876b\" (UID: \"98adc754-8753-494a-8083-0d98351c876b\") " Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.651725 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d872b841-9c08-4120-880d-1d2803c8e3bd-kube-api-access-mnb9h" (OuterVolumeSpecName: "kube-api-access-mnb9h") pod "d872b841-9c08-4120-880d-1d2803c8e3bd" (UID: "d872b841-9c08-4120-880d-1d2803c8e3bd"). InnerVolumeSpecName "kube-api-access-mnb9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.651956 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98adc754-8753-494a-8083-0d98351c876b-kube-api-access-l2g5j" (OuterVolumeSpecName: "kube-api-access-l2g5j") pod "98adc754-8753-494a-8083-0d98351c876b" (UID: "98adc754-8753-494a-8083-0d98351c876b"). InnerVolumeSpecName "kube-api-access-l2g5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.679834 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d872b841-9c08-4120-880d-1d2803c8e3bd" (UID: "d872b841-9c08-4120-880d-1d2803c8e3bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.696048 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "98adc754-8753-494a-8083-0d98351c876b" (UID: "98adc754-8753-494a-8083-0d98351c876b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.696211 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-config" (OuterVolumeSpecName: "config") pod "98adc754-8753-494a-8083-0d98351c876b" (UID: "98adc754-8753-494a-8083-0d98351c876b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.699566 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "98adc754-8753-494a-8083-0d98351c876b" (UID: "98adc754-8753-494a-8083-0d98351c876b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.700411 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "98adc754-8753-494a-8083-0d98351c876b" (UID: "98adc754-8753-494a-8083-0d98351c876b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.700493 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-config-data" (OuterVolumeSpecName: "config-data") pod "d872b841-9c08-4120-880d-1d2803c8e3bd" (UID: "d872b841-9c08-4120-880d-1d2803c8e3bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752477 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752527 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752539 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d872b841-9c08-4120-880d-1d2803c8e3bd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752548 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752556 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2g5j\" (UniqueName: \"kubernetes.io/projected/98adc754-8753-494a-8083-0d98351c876b-kube-api-access-l2g5j\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752567 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752577 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnb9h\" (UniqueName: \"kubernetes.io/projected/d872b841-9c08-4120-880d-1d2803c8e3bd-kube-api-access-mnb9h\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.752586 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98adc754-8753-494a-8083-0d98351c876b-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.985502 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" event={"ID":"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef","Type":"ContainerStarted","Data":"bed61455847031621f05af83499945d1ce1af8215b5907c6b7ccfd15ae0ffbb8"} Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.985620 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.988891 4675 generic.go:334] "Generic (PLEG): container finished" podID="98adc754-8753-494a-8083-0d98351c876b" containerID="872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e" exitCode=0 Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.988946 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.988978 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" event={"ID":"98adc754-8753-494a-8083-0d98351c876b","Type":"ContainerDied","Data":"872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e"} Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.989019 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-6vb8z" event={"ID":"98adc754-8753-494a-8083-0d98351c876b","Type":"ContainerDied","Data":"68b8c6a47d91f8614abfc636db8d08ecf29c7b461f9d8d7f9b9b628836cd3ef0"} Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.989039 4675 scope.go:117] "RemoveContainer" containerID="872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.991892 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbf54" event={"ID":"d872b841-9c08-4120-880d-1d2803c8e3bd","Type":"ContainerDied","Data":"a06ea90b247317f97fb262de00469640c97b357d539e9b2f17e1145117fd7b77"} Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.991918 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a06ea90b247317f97fb262de00469640c97b357d539e9b2f17e1145117fd7b77" Nov 25 12:45:50 crc kubenswrapper[4675]: I1125 12:45:50.991953 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbf54" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.023599 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" podStartSLOduration=3.023577837 podStartE2EDuration="3.023577837s" podCreationTimestamp="2025-11-25 12:45:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:51.014068552 +0000 UTC m=+1096.185660903" watchObservedRunningTime="2025-11-25 12:45:51.023577837 +0000 UTC m=+1096.195170178" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.041394 4675 scope.go:117] "RemoveContainer" containerID="c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.056718 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-6vb8z"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.062332 4675 scope.go:117] "RemoveContainer" containerID="872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e" Nov 25 12:45:51 crc kubenswrapper[4675]: E1125 12:45:51.062674 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e\": container with ID starting with 872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e not found: ID does not exist" containerID="872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.062702 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e"} err="failed to get container status \"872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e\": rpc error: code = NotFound desc = could not find container \"872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e\": container with ID starting with 872f6d1f7d184d33cf6ac47e45fff992807acf5c62b8a96c7702635d170c253e not found: ID does not exist" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.062720 4675 scope.go:117] "RemoveContainer" containerID="c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec" Nov 25 12:45:51 crc kubenswrapper[4675]: E1125 12:45:51.062918 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec\": container with ID starting with c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec not found: ID does not exist" containerID="c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.062941 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec"} err="failed to get container status \"c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec\": rpc error: code = NotFound desc = could not find container \"c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec\": container with ID starting with c4bcc9fb7c2951c09b415d0690c01b0f496f8ffc8ba1c5cf2ea347ef11695dec not found: ID does not exist" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.064743 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-6vb8z"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.305898 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-xcvr9"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.315993 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7hcbf"] Nov 25 12:45:51 crc kubenswrapper[4675]: E1125 12:45:51.316397 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d872b841-9c08-4120-880d-1d2803c8e3bd" containerName="keystone-db-sync" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.316414 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="d872b841-9c08-4120-880d-1d2803c8e3bd" containerName="keystone-db-sync" Nov 25 12:45:51 crc kubenswrapper[4675]: E1125 12:45:51.316430 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98adc754-8753-494a-8083-0d98351c876b" containerName="init" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.316435 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="98adc754-8753-494a-8083-0d98351c876b" containerName="init" Nov 25 12:45:51 crc kubenswrapper[4675]: E1125 12:45:51.316453 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98adc754-8753-494a-8083-0d98351c876b" containerName="dnsmasq-dns" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.316459 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="98adc754-8753-494a-8083-0d98351c876b" containerName="dnsmasq-dns" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.316624 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="98adc754-8753-494a-8083-0d98351c876b" containerName="dnsmasq-dns" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.316650 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="d872b841-9c08-4120-880d-1d2803c8e3bd" containerName="keystone-db-sync" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.317220 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.320888 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.321119 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.321252 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-69vb7" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.321393 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.342428 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7hcbf"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.371853 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-wtpzk"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.373519 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.413599 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-wtpzk"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476449 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476529 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-combined-ca-bundle\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476561 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476584 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnn2l\" (UniqueName: \"kubernetes.io/projected/1eb4af57-d386-4309-bbf5-33704d4c5f4d-kube-api-access-hnn2l\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476633 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x5ms\" (UniqueName: \"kubernetes.io/projected/8e7accff-b51a-446a-adbc-26c5fcd8263d-kube-api-access-8x5ms\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476700 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476731 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-config\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476756 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-scripts\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476781 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-fernet-keys\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476844 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-config-data\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476957 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-svc\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.476997 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-credential-keys\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.551152 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98adc754-8753-494a-8083-0d98351c876b" path="/var/lib/kubelet/pods/98adc754-8753-494a-8083-0d98351c876b/volumes" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.563589 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-rxzpl"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.571012 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.577612 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578068 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-config-data\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578105 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-svc\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578131 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-credential-keys\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578157 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578198 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-combined-ca-bundle\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578217 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578235 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnn2l\" (UniqueName: \"kubernetes.io/projected/1eb4af57-d386-4309-bbf5-33704d4c5f4d-kube-api-access-hnn2l\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578270 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x5ms\" (UniqueName: \"kubernetes.io/projected/8e7accff-b51a-446a-adbc-26c5fcd8263d-kube-api-access-8x5ms\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578318 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578337 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-scripts\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578352 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-config\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.578367 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-fernet-keys\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.580782 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.581979 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-config\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.582003 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.582016 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.582481 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-svc\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.598302 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-fernet-keys\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.599236 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-config-data\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.635065 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-combined-ca-bundle\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.635127 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.635145 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bxzgq" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.655553 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-credential-keys\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.655861 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-scripts\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.657471 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x5ms\" (UniqueName: \"kubernetes.io/projected/8e7accff-b51a-446a-adbc-26c5fcd8263d-kube-api-access-8x5ms\") pod \"keystone-bootstrap-7hcbf\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.668653 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.669530 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-rxzpl"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.682831 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8ee67608-bfaa-407c-9256-488729244fe0-etc-machine-id\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.682870 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-combined-ca-bundle\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.682897 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8sgr\" (UniqueName: \"kubernetes.io/projected/8ee67608-bfaa-407c-9256-488729244fe0-kube-api-access-j8sgr\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.682919 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-db-sync-config-data\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.682959 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-scripts\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.682982 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-config-data\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.706638 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnn2l\" (UniqueName: \"kubernetes.io/projected/1eb4af57-d386-4309-bbf5-33704d4c5f4d-kube-api-access-hnn2l\") pod \"dnsmasq-dns-847c4cc679-wtpzk\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.710979 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-66d446568f-7dtvh"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.712336 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.720241 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.720579 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.720708 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.720786 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.729457 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-t8rlm" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.786321 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66d446568f-7dtvh"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.787867 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4071e003-9808-4911-9cbc-5530d9400322-logs\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.787911 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4071e003-9808-4911-9cbc-5530d9400322-horizon-secret-key\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.787944 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkczl\" (UniqueName: \"kubernetes.io/projected/4071e003-9808-4911-9cbc-5530d9400322-kube-api-access-nkczl\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.787983 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8ee67608-bfaa-407c-9256-488729244fe0-etc-machine-id\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788012 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-config-data\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788036 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-scripts\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788063 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-combined-ca-bundle\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788098 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8sgr\" (UniqueName: \"kubernetes.io/projected/8ee67608-bfaa-407c-9256-488729244fe0-kube-api-access-j8sgr\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788127 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-db-sync-config-data\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788157 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-scripts\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.788188 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-config-data\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.790183 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8ee67608-bfaa-407c-9256-488729244fe0-etc-machine-id\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.821014 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-db-sync-config-data\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.821176 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-combined-ca-bundle\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.824347 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-scripts\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.826315 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-config-data\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.851321 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8sgr\" (UniqueName: \"kubernetes.io/projected/8ee67608-bfaa-407c-9256-488729244fe0-kube-api-access-j8sgr\") pod \"cinder-db-sync-rxzpl\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.892468 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4071e003-9808-4911-9cbc-5530d9400322-logs\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.893212 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4071e003-9808-4911-9cbc-5530d9400322-logs\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.893719 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4071e003-9808-4911-9cbc-5530d9400322-horizon-secret-key\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.893767 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkczl\" (UniqueName: \"kubernetes.io/projected/4071e003-9808-4911-9cbc-5530d9400322-kube-api-access-nkczl\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.893851 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-config-data\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.893879 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-scripts\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.901583 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4071e003-9808-4911-9cbc-5530d9400322-horizon-secret-key\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.901698 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-scripts\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.901888 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.902069 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-config-data\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.903869 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.922300 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.938017 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-756fb5694f-vqml4"] Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.939983 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.945288 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.966307 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkczl\" (UniqueName: \"kubernetes.io/projected/4071e003-9808-4911-9cbc-5530d9400322-kube-api-access-nkczl\") pod \"horizon-66d446568f-7dtvh\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:51 crc kubenswrapper[4675]: I1125 12:45:51.969595 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.099396 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.153828 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-756fb5694f-vqml4"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.207568 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxv5v\" (UniqueName: \"kubernetes.io/projected/32e0bbdb-534b-444c-850b-fb9257b211d7-kube-api-access-dxv5v\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.207692 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-config-data\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.207759 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-scripts\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.207862 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-config-data\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.207982 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98q44\" (UniqueName: \"kubernetes.io/projected/c38138b1-41d4-4e17-94a8-c1a66a3725c3-kube-api-access-98q44\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208036 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208061 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32e0bbdb-534b-444c-850b-fb9257b211d7-logs\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208142 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-log-httpd\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208194 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-scripts\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208235 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208255 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-run-httpd\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.208289 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/32e0bbdb-534b-444c-850b-fb9257b211d7-horizon-secret-key\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.215616 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.304375 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-ld5wn"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309646 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-config-data\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309723 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98q44\" (UniqueName: \"kubernetes.io/projected/c38138b1-41d4-4e17-94a8-c1a66a3725c3-kube-api-access-98q44\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309764 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309788 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32e0bbdb-534b-444c-850b-fb9257b211d7-logs\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309837 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-log-httpd\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309867 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-scripts\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309898 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309919 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-run-httpd\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309941 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/32e0bbdb-534b-444c-850b-fb9257b211d7-horizon-secret-key\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.309964 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxv5v\" (UniqueName: \"kubernetes.io/projected/32e0bbdb-534b-444c-850b-fb9257b211d7-kube-api-access-dxv5v\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.310004 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-config-data\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.310025 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-scripts\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.313759 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-scripts\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.319390 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-config-data\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.319938 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32e0bbdb-534b-444c-850b-fb9257b211d7-logs\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.320206 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-log-httpd\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.320365 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-run-httpd\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.320892 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/32e0bbdb-534b-444c-850b-fb9257b211d7-horizon-secret-key\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.322444 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.346506 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.347049 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hdnd8" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.347156 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.370331 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.372559 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.373770 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxv5v\" (UniqueName: \"kubernetes.io/projected/32e0bbdb-534b-444c-850b-fb9257b211d7-kube-api-access-dxv5v\") pod \"horizon-756fb5694f-vqml4\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.387787 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ld5wn"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.399111 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.400523 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.402709 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-config-data\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.407362 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.407444 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-scripts\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.407527 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.407626 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-zb75l" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.407765 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.444193 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-wtpzk"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.449998 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98q44\" (UniqueName: \"kubernetes.io/projected/c38138b1-41d4-4e17-94a8-c1a66a3725c3-kube-api-access-98q44\") pod \"ceilometer-0\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.500277 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519687 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519735 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpqnh\" (UniqueName: \"kubernetes.io/projected/56eedb12-32aa-47aa-a899-ffecbe3447bd-kube-api-access-mpqnh\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519835 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-logs\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519857 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519921 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-scripts\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519940 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-combined-ca-bundle\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.519980 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eba420bb-9044-4e38-bcd8-11e51c903cac-logs\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.520007 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.520033 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.520082 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncjkb\" (UniqueName: \"kubernetes.io/projected/eba420bb-9044-4e38-bcd8-11e51c903cac-kube-api-access-ncjkb\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.520141 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-config-data\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.520181 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-config-data\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.520207 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-scripts\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.530885 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cl8c2"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.532637 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.556866 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cl8c2"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.604904 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621421 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-scripts\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621480 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-config\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621508 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-combined-ca-bundle\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621537 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eba420bb-9044-4e38-bcd8-11e51c903cac-logs\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621567 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621590 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621620 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621648 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621685 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncjkb\" (UniqueName: \"kubernetes.io/projected/eba420bb-9044-4e38-bcd8-11e51c903cac-kube-api-access-ncjkb\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621731 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621753 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-config-data\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621792 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmbbf\" (UniqueName: \"kubernetes.io/projected/dc02c331-c8e4-4c4a-864f-54fe5f391fad-kube-api-access-rmbbf\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621833 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-config-data\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621867 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-scripts\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621897 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621921 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpqnh\" (UniqueName: \"kubernetes.io/projected/56eedb12-32aa-47aa-a899-ffecbe3447bd-kube-api-access-mpqnh\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.621983 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-logs\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.622008 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.622035 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.623357 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eba420bb-9044-4e38-bcd8-11e51c903cac-logs\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.624079 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.624341 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-logs\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.624424 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.654334 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-scripts\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.655124 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.659274 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-config-data\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.665331 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.665777 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.670559 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncjkb\" (UniqueName: \"kubernetes.io/projected/eba420bb-9044-4e38-bcd8-11e51c903cac-kube-api-access-ncjkb\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.671403 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-scripts\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.672203 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-combined-ca-bundle\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.676803 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-config-data\") pod \"placement-db-sync-ld5wn\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.694871 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpqnh\" (UniqueName: \"kubernetes.io/projected/56eedb12-32aa-47aa-a899-ffecbe3447bd-kube-api-access-mpqnh\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.724953 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.725032 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.725073 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmbbf\" (UniqueName: \"kubernetes.io/projected/dc02c331-c8e4-4c4a-864f-54fe5f391fad-kube-api-access-rmbbf\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.725189 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.725230 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-config\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.725267 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.726241 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.740184 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.745729 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld5wn" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.748559 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.759121 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-config\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.759190 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.762186 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.762588 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.770372 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.773313 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.777041 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.777264 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.780254 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmbbf\" (UniqueName: \"kubernetes.io/projected/dc02c331-c8e4-4c4a-864f-54fe5f391fad-kube-api-access-rmbbf\") pod \"dnsmasq-dns-785d8bcb8c-cl8c2\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.802307 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.902304 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7hcbf"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.907632 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.932928 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933081 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58dcl\" (UniqueName: \"kubernetes.io/projected/be24ac53-f915-4062-8ca5-148425aa5f36-kube-api-access-58dcl\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933120 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933158 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-logs\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933185 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933262 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933308 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.933372 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.958585 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-7322-account-create-hwqkt"] Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.960100 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:52 crc kubenswrapper[4675]: I1125 12:45:52.964170 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.026754 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7322-account-create-hwqkt"] Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.035763 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.036246 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.036404 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.036530 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.036718 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58dcl\" (UniqueName: \"kubernetes.io/projected/be24ac53-f915-4062-8ca5-148425aa5f36-kube-api-access-58dcl\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.038292 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.038776 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-logs\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.038956 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.039927 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.042640 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-logs\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.042910 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.052570 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.061306 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.064462 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.070044 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.071332 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58dcl\" (UniqueName: \"kubernetes.io/projected/be24ac53-f915-4062-8ca5-148425aa5f36-kube-api-access-58dcl\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.144237 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5bgb\" (UniqueName: \"kubernetes.io/projected/2ac2c87f-984d-4561-a825-b3b25be4e078-kube-api-access-v5bgb\") pod \"barbican-7322-account-create-hwqkt\" (UID: \"2ac2c87f-984d-4561-a825-b3b25be4e078\") " pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.153973 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.164389 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerName="dnsmasq-dns" containerID="cri-o://bed61455847031621f05af83499945d1ce1af8215b5907c6b7ccfd15ae0ffbb8" gracePeriod=10 Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.164662 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7hcbf" event={"ID":"8e7accff-b51a-446a-adbc-26c5fcd8263d","Type":"ContainerStarted","Data":"9a89acbac25202b328ac8741e475de992786fae531688a19b25825c826e531b2"} Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.220886 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-78c5-account-create-64jfv"] Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.236137 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-78c5-account-create-64jfv"] Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.236461 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.247575 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5bgb\" (UniqueName: \"kubernetes.io/projected/2ac2c87f-984d-4561-a825-b3b25be4e078-kube-api-access-v5bgb\") pod \"barbican-7322-account-create-hwqkt\" (UID: \"2ac2c87f-984d-4561-a825-b3b25be4e078\") " pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.248038 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.309192 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5bgb\" (UniqueName: \"kubernetes.io/projected/2ac2c87f-984d-4561-a825-b3b25be4e078-kube-api-access-v5bgb\") pod \"barbican-7322-account-create-hwqkt\" (UID: \"2ac2c87f-984d-4561-a825-b3b25be4e078\") " pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.349762 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckzgd\" (UniqueName: \"kubernetes.io/projected/49a0347f-7b9b-461f-a5d7-1c803263ba15-kube-api-access-ckzgd\") pod \"neutron-78c5-account-create-64jfv\" (UID: \"49a0347f-7b9b-461f-a5d7-1c803263ba15\") " pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.373377 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.401727 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-wtpzk"] Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.451241 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckzgd\" (UniqueName: \"kubernetes.io/projected/49a0347f-7b9b-461f-a5d7-1c803263ba15-kube-api-access-ckzgd\") pod \"neutron-78c5-account-create-64jfv\" (UID: \"49a0347f-7b9b-461f-a5d7-1c803263ba15\") " pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.459630 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.472882 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckzgd\" (UniqueName: \"kubernetes.io/projected/49a0347f-7b9b-461f-a5d7-1c803263ba15-kube-api-access-ckzgd\") pod \"neutron-78c5-account-create-64jfv\" (UID: \"49a0347f-7b9b-461f-a5d7-1c803263ba15\") " pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.496842 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.600562 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66d446568f-7dtvh"] Nov 25 12:45:53 crc kubenswrapper[4675]: I1125 12:45:53.930076 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-756fb5694f-vqml4"] Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.015622 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-rxzpl"] Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.140227 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ld5wn"] Nov 25 12:45:54 crc kubenswrapper[4675]: W1125 12:45:54.165953 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc02c331_c8e4_4c4a_864f_54fe5f391fad.slice/crio-1158e720ae0b8a45fec93ed17617cd8da1d509981abe220aae4cce3766036133 WatchSource:0}: Error finding container 1158e720ae0b8a45fec93ed17617cd8da1d509981abe220aae4cce3766036133: Status 404 returned error can't find the container with id 1158e720ae0b8a45fec93ed17617cd8da1d509981abe220aae4cce3766036133 Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.166231 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cl8c2"] Nov 25 12:45:54 crc kubenswrapper[4675]: W1125 12:45:54.172999 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeba420bb_9044_4e38_bcd8_11e51c903cac.slice/crio-a3d3434fe321274a1f453e52cd3c38529c256414a94b8e30ba1cb49e09532f7e WatchSource:0}: Error finding container a3d3434fe321274a1f453e52cd3c38529c256414a94b8e30ba1cb49e09532f7e: Status 404 returned error can't find the container with id a3d3434fe321274a1f453e52cd3c38529c256414a94b8e30ba1cb49e09532f7e Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.234072 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-756fb5694f-vqml4" event={"ID":"32e0bbdb-534b-444c-850b-fb9257b211d7","Type":"ContainerStarted","Data":"10e00ccb1a0a2371e1fc553b64ce3d00ca03eae208a28d5d8d736b51b4201927"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.355288 4675 generic.go:334] "Generic (PLEG): container finished" podID="1eb4af57-d386-4309-bbf5-33704d4c5f4d" containerID="a1299627a88f7ad94055be7a2e06ca470568df91e46345aa65cf0c01464dc49e" exitCode=0 Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.356301 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" event={"ID":"1eb4af57-d386-4309-bbf5-33704d4c5f4d","Type":"ContainerDied","Data":"a1299627a88f7ad94055be7a2e06ca470568df91e46345aa65cf0c01464dc49e"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.356333 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" event={"ID":"1eb4af57-d386-4309-bbf5-33704d4c5f4d","Type":"ContainerStarted","Data":"f09f19b57c12ada13fe4f6ea5cc3b6d67c82fee89a7dfd10511d2f980f0a07f7"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.382521 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.399953 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7hcbf" event={"ID":"8e7accff-b51a-446a-adbc-26c5fcd8263d","Type":"ContainerStarted","Data":"57348e83ac2f41ec1acdd7c66c68f0633c636d052d1dc0604ad3155f4af05fda"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.422062 4675 generic.go:334] "Generic (PLEG): container finished" podID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerID="bed61455847031621f05af83499945d1ce1af8215b5907c6b7ccfd15ae0ffbb8" exitCode=0 Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.422244 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.422331 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" event={"ID":"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef","Type":"ContainerDied","Data":"bed61455847031621f05af83499945d1ce1af8215b5907c6b7ccfd15ae0ffbb8"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.422361 4675 scope.go:117] "RemoveContainer" containerID="bed61455847031621f05af83499945d1ce1af8215b5907c6b7ccfd15ae0ffbb8" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.427401 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7322-account-create-hwqkt"] Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.432915 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" event={"ID":"dc02c331-c8e4-4c4a-864f-54fe5f391fad","Type":"ContainerStarted","Data":"1158e720ae0b8a45fec93ed17617cd8da1d509981abe220aae4cce3766036133"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.468983 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66d446568f-7dtvh" event={"ID":"4071e003-9808-4911-9cbc-5530d9400322","Type":"ContainerStarted","Data":"c5ef0dc26a955b33c2ebc493e739b05b3db5c09fc07ea22379b8108f19a6cc9c"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.479484 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rxzpl" event={"ID":"8ee67608-bfaa-407c-9256-488729244fe0","Type":"ContainerStarted","Data":"44de3fa9bcb75d8092ea5ce4c52b61fb01e0096a55ea253df053cb93d7fdb571"} Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.486891 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-nb\") pod \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.486949 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-swift-storage-0\") pod \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.487037 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-sb\") pod \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.487059 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvfp4\" (UniqueName: \"kubernetes.io/projected/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-kube-api-access-gvfp4\") pod \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.487108 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-svc\") pod \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.487142 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-config\") pod \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\" (UID: \"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef\") " Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.490916 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7hcbf" podStartSLOduration=3.490893197 podStartE2EDuration="3.490893197s" podCreationTimestamp="2025-11-25 12:45:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:54.436054434 +0000 UTC m=+1099.607646805" watchObservedRunningTime="2025-11-25 12:45:54.490893197 +0000 UTC m=+1099.662485548" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.519197 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-kube-api-access-gvfp4" (OuterVolumeSpecName: "kube-api-access-gvfp4") pod "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" (UID: "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"). InnerVolumeSpecName "kube-api-access-gvfp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.553633 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.591067 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvfp4\" (UniqueName: \"kubernetes.io/projected/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-kube-api-access-gvfp4\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.595148 4675 scope.go:117] "RemoveContainer" containerID="23a392802ed3ecd94db738cbf963af76d9ab90471c90a72fb467074c61bdc14f" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.596720 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-78c5-account-create-64jfv"] Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.654070 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:45:54 crc kubenswrapper[4675]: W1125 12:45:54.735576 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe24ac53_f915_4062_8ca5_148425aa5f36.slice/crio-9ac8e5b19714ed1704a2a54091049a05f9b95bc4a26824d7682aa4c4fc2d6bdf WatchSource:0}: Error finding container 9ac8e5b19714ed1704a2a54091049a05f9b95bc4a26824d7682aa4c4fc2d6bdf: Status 404 returned error can't find the container with id 9ac8e5b19714ed1704a2a54091049a05f9b95bc4a26824d7682aa4c4fc2d6bdf Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.873257 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" (UID: "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.898556 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.940645 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" (UID: "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.953737 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" (UID: "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.968250 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" (UID: "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.990399 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:54 crc kubenswrapper[4675]: I1125 12:45:54.987955 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-config" (OuterVolumeSpecName: "config") pod "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" (UID: "58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.004399 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.004429 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.004440 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.004447 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.105657 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-sb\") pod \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.105717 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-config\") pod \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.105753 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-svc\") pod \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.105894 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-nb\") pod \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.106620 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnn2l\" (UniqueName: \"kubernetes.io/projected/1eb4af57-d386-4309-bbf5-33704d4c5f4d-kube-api-access-hnn2l\") pod \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.106706 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-swift-storage-0\") pod \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\" (UID: \"1eb4af57-d386-4309-bbf5-33704d4c5f4d\") " Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.127400 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eb4af57-d386-4309-bbf5-33704d4c5f4d-kube-api-access-hnn2l" (OuterVolumeSpecName: "kube-api-access-hnn2l") pod "1eb4af57-d386-4309-bbf5-33704d4c5f4d" (UID: "1eb4af57-d386-4309-bbf5-33704d4c5f4d"). InnerVolumeSpecName "kube-api-access-hnn2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.142403 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1eb4af57-d386-4309-bbf5-33704d4c5f4d" (UID: "1eb4af57-d386-4309-bbf5-33704d4c5f4d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.155876 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-config" (OuterVolumeSpecName: "config") pod "1eb4af57-d386-4309-bbf5-33704d4c5f4d" (UID: "1eb4af57-d386-4309-bbf5-33704d4c5f4d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.159417 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1eb4af57-d386-4309-bbf5-33704d4c5f4d" (UID: "1eb4af57-d386-4309-bbf5-33704d4c5f4d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.165048 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1eb4af57-d386-4309-bbf5-33704d4c5f4d" (UID: "1eb4af57-d386-4309-bbf5-33704d4c5f4d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.173691 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1eb4af57-d386-4309-bbf5-33704d4c5f4d" (UID: "1eb4af57-d386-4309-bbf5-33704d4c5f4d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.212106 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.212132 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnn2l\" (UniqueName: \"kubernetes.io/projected/1eb4af57-d386-4309-bbf5-33704d4c5f4d-kube-api-access-hnn2l\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.212143 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.212152 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.212161 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.212169 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1eb4af57-d386-4309-bbf5-33704d4c5f4d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.568939 4675 generic.go:334] "Generic (PLEG): container finished" podID="2ac2c87f-984d-4561-a825-b3b25be4e078" containerID="29fb2159f2df74be823280e69570f59540cbf4b8e04474c75160d506ee7fdf2a" exitCode=0 Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.577827 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.578177 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7322-account-create-hwqkt" event={"ID":"2ac2c87f-984d-4561-a825-b3b25be4e078","Type":"ContainerDied","Data":"29fb2159f2df74be823280e69570f59540cbf4b8e04474c75160d506ee7fdf2a"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.578209 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.578226 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7322-account-create-hwqkt" event={"ID":"2ac2c87f-984d-4561-a825-b3b25be4e078","Type":"ContainerStarted","Data":"5be686961427e6117301e2a48a026cebde312e10de37201b055dfedb768b4566"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.578237 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-xcvr9" event={"ID":"58908a5d-b2bd-4f7d-9ef0-ef85a87096ef","Type":"ContainerDied","Data":"1ada2aa3ba143b26c2ae490c784f8996b3f524a60468a135a64f3d7789a280b2"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.611643 4675 generic.go:334] "Generic (PLEG): container finished" podID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerID="695bd9662390afb23583ebf9078df6151b906d2810a6acc606adb171c3e7d6df" exitCode=0 Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.611723 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" event={"ID":"dc02c331-c8e4-4c4a-864f-54fe5f391fad","Type":"ContainerDied","Data":"695bd9662390afb23583ebf9078df6151b906d2810a6acc606adb171c3e7d6df"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.630755 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-66d446568f-7dtvh"] Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.661117 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5-account-create-64jfv" event={"ID":"49a0347f-7b9b-461f-a5d7-1c803263ba15","Type":"ContainerStarted","Data":"84096ce7bb3e8bcde408c1f3c0f3976ae818bfe2a77aa53e5d10f8e2ddd60468"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.699871 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"56eedb12-32aa-47aa-a899-ffecbe3447bd","Type":"ContainerStarted","Data":"ddc4bb266abd74d896c305cf69c03de2fb8718d9123ac35e6d837cc8f6ac023e"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.719043 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" event={"ID":"1eb4af57-d386-4309-bbf5-33704d4c5f4d","Type":"ContainerDied","Data":"f09f19b57c12ada13fe4f6ea5cc3b6d67c82fee89a7dfd10511d2f980f0a07f7"} Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.719092 4675 scope.go:117] "RemoveContainer" containerID="a1299627a88f7ad94055be7a2e06ca470568df91e46345aa65cf0c01464dc49e" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.719183 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-wtpzk" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.808152 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-74dcc94bb9-qjv6x"] Nov 25 12:45:55 crc kubenswrapper[4675]: E1125 12:45:55.809240 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerName="init" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.809255 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerName="init" Nov 25 12:45:55 crc kubenswrapper[4675]: E1125 12:45:55.809285 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerName="dnsmasq-dns" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.809293 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerName="dnsmasq-dns" Nov 25 12:45:55 crc kubenswrapper[4675]: E1125 12:45:55.809322 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eb4af57-d386-4309-bbf5-33704d4c5f4d" containerName="init" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.809328 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eb4af57-d386-4309-bbf5-33704d4c5f4d" containerName="init" Nov 25 12:45:55 crc kubenswrapper[4675]: I1125 12:45:55.810591 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" containerName="dnsmasq-dns" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.810651 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eb4af57-d386-4309-bbf5-33704d4c5f4d" containerName="init" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.815870 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerStarted","Data":"3a6aac522d7aade7b31a9237b9b9bbff8b07d2a043cca75c56b8ab27f9f2ab02"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.815982 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.835951 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-74dcc94bb9-qjv6x"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.839628 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be24ac53-f915-4062-8ca5-148425aa5f36","Type":"ContainerStarted","Data":"9ac8e5b19714ed1704a2a54091049a05f9b95bc4a26824d7682aa4c4fc2d6bdf"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.858243 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld5wn" event={"ID":"eba420bb-9044-4e38-bcd8-11e51c903cac","Type":"ContainerStarted","Data":"a3d3434fe321274a1f453e52cd3c38529c256414a94b8e30ba1cb49e09532f7e"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.865048 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.873690 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.943443 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-scripts\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.943527 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-horizon-secret-key\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.943571 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-logs\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.943586 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h68mc\" (UniqueName: \"kubernetes.io/projected/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-kube-api-access-h68mc\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:55.943618 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-config-data\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.045323 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-horizon-secret-key\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.045389 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-logs\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.045418 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h68mc\" (UniqueName: \"kubernetes.io/projected/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-kube-api-access-h68mc\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.045457 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-config-data\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.045576 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-scripts\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.046802 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-logs\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.047638 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-config-data\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.048920 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-scripts\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.054499 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-horizon-secret-key\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.067730 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h68mc\" (UniqueName: \"kubernetes.io/projected/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-kube-api-access-h68mc\") pod \"horizon-74dcc94bb9-qjv6x\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.115051 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-78c5-account-create-64jfv" podStartSLOduration=3.115007077 podStartE2EDuration="3.115007077s" podCreationTimestamp="2025-11-25 12:45:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:56.107976201 +0000 UTC m=+1101.279568562" watchObservedRunningTime="2025-11-25 12:45:56.115007077 +0000 UTC m=+1101.286599438" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.430175 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.480732 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-wtpzk"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.505259 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-wtpzk"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.515001 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-xcvr9"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.523256 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-xcvr9"] Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.882172 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be24ac53-f915-4062-8ca5-148425aa5f36","Type":"ContainerStarted","Data":"d6b57109bed52766071b186bac6816d3e4b490a8088536efd4c7be58a286976f"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.888783 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" event={"ID":"dc02c331-c8e4-4c4a-864f-54fe5f391fad","Type":"ContainerStarted","Data":"77c3d7b0b167f0d69ce157923852fa0c1a656614e6435eea61b271c051b23408"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.890603 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.902869 4675 generic.go:334] "Generic (PLEG): container finished" podID="49a0347f-7b9b-461f-a5d7-1c803263ba15" containerID="d626183bde11ee3ed62ca10ecae3dc77a88bd27c1c257686dc9e439937c521fd" exitCode=0 Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.903022 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5-account-create-64jfv" event={"ID":"49a0347f-7b9b-461f-a5d7-1c803263ba15","Type":"ContainerDied","Data":"d626183bde11ee3ed62ca10ecae3dc77a88bd27c1c257686dc9e439937c521fd"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.910844 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"56eedb12-32aa-47aa-a899-ffecbe3447bd","Type":"ContainerStarted","Data":"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5"} Nov 25 12:45:56 crc kubenswrapper[4675]: I1125 12:45:56.912677 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" podStartSLOduration=4.912616454 podStartE2EDuration="4.912616454s" podCreationTimestamp="2025-11-25 12:45:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:56.90973573 +0000 UTC m=+1102.081328081" watchObservedRunningTime="2025-11-25 12:45:56.912616454 +0000 UTC m=+1102.084208795" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.169497 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-74dcc94bb9-qjv6x"] Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.563944 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.570334 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eb4af57-d386-4309-bbf5-33704d4c5f4d" path="/var/lib/kubelet/pods/1eb4af57-d386-4309-bbf5-33704d4c5f4d/volumes" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.570926 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58908a5d-b2bd-4f7d-9ef0-ef85a87096ef" path="/var/lib/kubelet/pods/58908a5d-b2bd-4f7d-9ef0-ef85a87096ef/volumes" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.593297 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5bgb\" (UniqueName: \"kubernetes.io/projected/2ac2c87f-984d-4561-a825-b3b25be4e078-kube-api-access-v5bgb\") pod \"2ac2c87f-984d-4561-a825-b3b25be4e078\" (UID: \"2ac2c87f-984d-4561-a825-b3b25be4e078\") " Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.599007 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ac2c87f-984d-4561-a825-b3b25be4e078-kube-api-access-v5bgb" (OuterVolumeSpecName: "kube-api-access-v5bgb") pod "2ac2c87f-984d-4561-a825-b3b25be4e078" (UID: "2ac2c87f-984d-4561-a825-b3b25be4e078"). InnerVolumeSpecName "kube-api-access-v5bgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.694626 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5bgb\" (UniqueName: \"kubernetes.io/projected/2ac2c87f-984d-4561-a825-b3b25be4e078-kube-api-access-v5bgb\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.935655 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be24ac53-f915-4062-8ca5-148425aa5f36","Type":"ContainerStarted","Data":"760ddb3730823d40aad88c9f539385f53831676cde803a4c68f76f024941c47a"} Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.936120 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-log" containerID="cri-o://d6b57109bed52766071b186bac6816d3e4b490a8088536efd4c7be58a286976f" gracePeriod=30 Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.938634 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-httpd" containerID="cri-o://760ddb3730823d40aad88c9f539385f53831676cde803a4c68f76f024941c47a" gracePeriod=30 Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.945704 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74dcc94bb9-qjv6x" event={"ID":"e30a18e5-593a-404a-9bc3-0ac55ecf6d94","Type":"ContainerStarted","Data":"65acf609d4588c8b4dfbe47b148668cc50cfe409f353f500cb7aadf697f079cb"} Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.958755 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-log" containerID="cri-o://53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5" gracePeriod=30 Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.958850 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"56eedb12-32aa-47aa-a899-ffecbe3447bd","Type":"ContainerStarted","Data":"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099"} Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.958900 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-httpd" containerID="cri-o://c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099" gracePeriod=30 Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.979700 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.979677008 podStartE2EDuration="5.979677008s" podCreationTimestamp="2025-11-25 12:45:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:57.965169667 +0000 UTC m=+1103.136762018" watchObservedRunningTime="2025-11-25 12:45:57.979677008 +0000 UTC m=+1103.151269349" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.991345 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7322-account-create-hwqkt" Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.991555 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7322-account-create-hwqkt" event={"ID":"2ac2c87f-984d-4561-a825-b3b25be4e078","Type":"ContainerDied","Data":"5be686961427e6117301e2a48a026cebde312e10de37201b055dfedb768b4566"} Nov 25 12:45:57 crc kubenswrapper[4675]: I1125 12:45:57.991610 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5be686961427e6117301e2a48a026cebde312e10de37201b055dfedb768b4566" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.028404 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.028388619 podStartE2EDuration="6.028388619s" podCreationTimestamp="2025-11-25 12:45:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:45:58.006647354 +0000 UTC m=+1103.178239705" watchObservedRunningTime="2025-11-25 12:45:58.028388619 +0000 UTC m=+1103.199980970" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.465892 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.524547 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckzgd\" (UniqueName: \"kubernetes.io/projected/49a0347f-7b9b-461f-a5d7-1c803263ba15-kube-api-access-ckzgd\") pod \"49a0347f-7b9b-461f-a5d7-1c803263ba15\" (UID: \"49a0347f-7b9b-461f-a5d7-1c803263ba15\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.542749 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49a0347f-7b9b-461f-a5d7-1c803263ba15-kube-api-access-ckzgd" (OuterVolumeSpecName: "kube-api-access-ckzgd") pod "49a0347f-7b9b-461f-a5d7-1c803263ba15" (UID: "49a0347f-7b9b-461f-a5d7-1c803263ba15"). InnerVolumeSpecName "kube-api-access-ckzgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.630166 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckzgd\" (UniqueName: \"kubernetes.io/projected/49a0347f-7b9b-461f-a5d7-1c803263ba15-kube-api-access-ckzgd\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.658932 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.731438 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-logs\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.731533 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.731565 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpqnh\" (UniqueName: \"kubernetes.io/projected/56eedb12-32aa-47aa-a899-ffecbe3447bd-kube-api-access-mpqnh\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.731808 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-public-tls-certs\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.731882 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-httpd-run\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.731944 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-combined-ca-bundle\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.732062 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-config-data\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.732105 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-scripts\") pod \"56eedb12-32aa-47aa-a899-ffecbe3447bd\" (UID: \"56eedb12-32aa-47aa-a899-ffecbe3447bd\") " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.734461 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.737273 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56eedb12-32aa-47aa-a899-ffecbe3447bd-kube-api-access-mpqnh" (OuterVolumeSpecName: "kube-api-access-mpqnh") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "kube-api-access-mpqnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.738027 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-logs" (OuterVolumeSpecName: "logs") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.739628 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.742634 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-scripts" (OuterVolumeSpecName: "scripts") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.780792 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.789034 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.796100 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-config-data" (OuterVolumeSpecName: "config-data") pod "56eedb12-32aa-47aa-a899-ffecbe3447bd" (UID: "56eedb12-32aa-47aa-a899-ffecbe3447bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834490 4675 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834535 4675 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834547 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834559 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834568 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eedb12-32aa-47aa-a899-ffecbe3447bd-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834579 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56eedb12-32aa-47aa-a899-ffecbe3447bd-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834606 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.834619 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpqnh\" (UniqueName: \"kubernetes.io/projected/56eedb12-32aa-47aa-a899-ffecbe3447bd-kube-api-access-mpqnh\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.858383 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 12:45:58 crc kubenswrapper[4675]: I1125 12:45:58.937541 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.007527 4675 generic.go:334] "Generic (PLEG): container finished" podID="be24ac53-f915-4062-8ca5-148425aa5f36" containerID="760ddb3730823d40aad88c9f539385f53831676cde803a4c68f76f024941c47a" exitCode=143 Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.007564 4675 generic.go:334] "Generic (PLEG): container finished" podID="be24ac53-f915-4062-8ca5-148425aa5f36" containerID="d6b57109bed52766071b186bac6816d3e4b490a8088536efd4c7be58a286976f" exitCode=143 Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.007602 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be24ac53-f915-4062-8ca5-148425aa5f36","Type":"ContainerDied","Data":"760ddb3730823d40aad88c9f539385f53831676cde803a4c68f76f024941c47a"} Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.007649 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be24ac53-f915-4062-8ca5-148425aa5f36","Type":"ContainerDied","Data":"d6b57109bed52766071b186bac6816d3e4b490a8088536efd4c7be58a286976f"} Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.012382 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78c5-account-create-64jfv" event={"ID":"49a0347f-7b9b-461f-a5d7-1c803263ba15","Type":"ContainerDied","Data":"84096ce7bb3e8bcde408c1f3c0f3976ae818bfe2a77aa53e5d10f8e2ddd60468"} Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.012419 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84096ce7bb3e8bcde408c1f3c0f3976ae818bfe2a77aa53e5d10f8e2ddd60468" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.012476 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78c5-account-create-64jfv" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.032681 4675 generic.go:334] "Generic (PLEG): container finished" podID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerID="c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099" exitCode=143 Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.032721 4675 generic.go:334] "Generic (PLEG): container finished" podID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerID="53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5" exitCode=143 Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.032988 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.033651 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"56eedb12-32aa-47aa-a899-ffecbe3447bd","Type":"ContainerDied","Data":"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099"} Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.033690 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"56eedb12-32aa-47aa-a899-ffecbe3447bd","Type":"ContainerDied","Data":"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5"} Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.033706 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"56eedb12-32aa-47aa-a899-ffecbe3447bd","Type":"ContainerDied","Data":"ddc4bb266abd74d896c305cf69c03de2fb8718d9123ac35e6d837cc8f6ac023e"} Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.033735 4675 scope.go:117] "RemoveContainer" containerID="c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.132179 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.141353 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.181585 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:59 crc kubenswrapper[4675]: E1125 12:45:59.182254 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac2c87f-984d-4561-a825-b3b25be4e078" containerName="mariadb-account-create" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182272 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac2c87f-984d-4561-a825-b3b25be4e078" containerName="mariadb-account-create" Nov 25 12:45:59 crc kubenswrapper[4675]: E1125 12:45:59.182287 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49a0347f-7b9b-461f-a5d7-1c803263ba15" containerName="mariadb-account-create" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182293 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="49a0347f-7b9b-461f-a5d7-1c803263ba15" containerName="mariadb-account-create" Nov 25 12:45:59 crc kubenswrapper[4675]: E1125 12:45:59.182311 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-httpd" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182318 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-httpd" Nov 25 12:45:59 crc kubenswrapper[4675]: E1125 12:45:59.182324 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-log" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182329 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-log" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182685 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac2c87f-984d-4561-a825-b3b25be4e078" containerName="mariadb-account-create" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182699 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-log" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182707 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" containerName="glance-httpd" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.182725 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="49a0347f-7b9b-461f-a5d7-1c803263ba15" containerName="mariadb-account-create" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.184898 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.188191 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.188206 4675 scope.go:117] "RemoveContainer" containerID="53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.188366 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.199830 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.286036 4675 scope.go:117] "RemoveContainer" containerID="c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099" Nov 25 12:45:59 crc kubenswrapper[4675]: E1125 12:45:59.287460 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099\": container with ID starting with c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099 not found: ID does not exist" containerID="c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.287539 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099"} err="failed to get container status \"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099\": rpc error: code = NotFound desc = could not find container \"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099\": container with ID starting with c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099 not found: ID does not exist" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.287564 4675 scope.go:117] "RemoveContainer" containerID="53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5" Nov 25 12:45:59 crc kubenswrapper[4675]: E1125 12:45:59.287973 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5\": container with ID starting with 53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5 not found: ID does not exist" containerID="53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.287998 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5"} err="failed to get container status \"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5\": rpc error: code = NotFound desc = could not find container \"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5\": container with ID starting with 53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5 not found: ID does not exist" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.288011 4675 scope.go:117] "RemoveContainer" containerID="c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.291935 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099"} err="failed to get container status \"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099\": rpc error: code = NotFound desc = could not find container \"c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099\": container with ID starting with c60bd7e8ce344c9caac739dff9bcbddaa80ce5aa63b494e20a0858ef77e17099 not found: ID does not exist" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.291969 4675 scope.go:117] "RemoveContainer" containerID="53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.303995 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5"} err="failed to get container status \"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5\": rpc error: code = NotFound desc = could not find container \"53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5\": container with ID starting with 53603d5110102c958157a036c29de23167cd47404b368489b2975f99946a2ae5 not found: ID does not exist" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345035 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-config-data\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345195 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345261 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345385 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-logs\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345401 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-scripts\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345453 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345555 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.345596 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wmpn\" (UniqueName: \"kubernetes.io/projected/50bd5686-96db-425f-a4c9-10e84703f8fe-kube-api-access-2wmpn\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.449862 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.449941 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.450012 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-logs\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.450031 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-scripts\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.450065 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.450134 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.450167 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wmpn\" (UniqueName: \"kubernetes.io/projected/50bd5686-96db-425f-a4c9-10e84703f8fe-kube-api-access-2wmpn\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.450194 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-config-data\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.451536 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.451583 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-logs\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.451904 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.458567 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-config-data\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.470280 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.474318 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.530656 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wmpn\" (UniqueName: \"kubernetes.io/projected/50bd5686-96db-425f-a4c9-10e84703f8fe-kube-api-access-2wmpn\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.531217 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-scripts\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.554027 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.570321 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:45:59 crc kubenswrapper[4675]: I1125 12:45:59.590953 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56eedb12-32aa-47aa-a899-ffecbe3447bd" path="/var/lib/kubelet/pods/56eedb12-32aa-47aa-a899-ffecbe3447bd/volumes" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.050982 4675 generic.go:334] "Generic (PLEG): container finished" podID="8e7accff-b51a-446a-adbc-26c5fcd8263d" containerID="57348e83ac2f41ec1acdd7c66c68f0633c636d052d1dc0604ad3155f4af05fda" exitCode=0 Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.051057 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7hcbf" event={"ID":"8e7accff-b51a-446a-adbc-26c5fcd8263d","Type":"ContainerDied","Data":"57348e83ac2f41ec1acdd7c66c68f0633c636d052d1dc0604ad3155f4af05fda"} Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.297170 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.850947 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-756fb5694f-vqml4"] Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.893103 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6df5497f4d-4g9tv"] Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.906478 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.913908 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.929943 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6df5497f4d-4g9tv"] Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.984003 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.990912 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-combined-ca-bundle\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.991026 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-config-data\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.991083 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-scripts\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.991104 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sbwp\" (UniqueName: \"kubernetes.io/projected/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-kube-api-access-6sbwp\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.991126 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-tls-certs\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.991152 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-logs\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:00 crc kubenswrapper[4675]: I1125 12:46:00.991234 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-secret-key\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.002694 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-74dcc94bb9-qjv6x"] Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.026742 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85d4f84f96-fcncp"] Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.028172 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.052702 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85d4f84f96-fcncp"] Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092773 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-config-data\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092855 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-scripts\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092873 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sbwp\" (UniqueName: \"kubernetes.io/projected/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-kube-api-access-6sbwp\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092890 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-tls-certs\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092910 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-logs\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092965 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-secret-key\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.092993 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-combined-ca-bundle\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.095354 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-config-data\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.095833 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-scripts\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.096006 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-logs\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.109027 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-combined-ca-bundle\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.114270 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-secret-key\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.122285 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sbwp\" (UniqueName: \"kubernetes.io/projected/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-kube-api-access-6sbwp\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.120274 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-tls-certs\") pod \"horizon-6df5497f4d-4g9tv\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.194753 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d2040-4c83-4443-989e-cc844466e840-logs\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.194926 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-combined-ca-bundle\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.194949 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-horizon-secret-key\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.194971 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/412d2040-4c83-4443-989e-cc844466e840-scripts\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.195009 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-horizon-tls-certs\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.195055 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/412d2040-4c83-4443-989e-cc844466e840-config-data\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.195091 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9nch\" (UniqueName: \"kubernetes.io/projected/412d2040-4c83-4443-989e-cc844466e840-kube-api-access-g9nch\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.252769 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297068 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d2040-4c83-4443-989e-cc844466e840-logs\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297156 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-combined-ca-bundle\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297183 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-horizon-secret-key\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297210 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/412d2040-4c83-4443-989e-cc844466e840-scripts\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297245 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-horizon-tls-certs\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297283 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/412d2040-4c83-4443-989e-cc844466e840-config-data\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297336 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9nch\" (UniqueName: \"kubernetes.io/projected/412d2040-4c83-4443-989e-cc844466e840-kube-api-access-g9nch\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.297627 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d2040-4c83-4443-989e-cc844466e840-logs\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.298379 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/412d2040-4c83-4443-989e-cc844466e840-scripts\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.299570 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/412d2040-4c83-4443-989e-cc844466e840-config-data\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.303869 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-combined-ca-bundle\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.307906 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-horizon-tls-certs\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.323537 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/412d2040-4c83-4443-989e-cc844466e840-horizon-secret-key\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.337433 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9nch\" (UniqueName: \"kubernetes.io/projected/412d2040-4c83-4443-989e-cc844466e840-kube-api-access-g9nch\") pod \"horizon-85d4f84f96-fcncp\" (UID: \"412d2040-4c83-4443-989e-cc844466e840\") " pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:01 crc kubenswrapper[4675]: I1125 12:46:01.373651 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:02 crc kubenswrapper[4675]: I1125 12:46:02.908981 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:46:02 crc kubenswrapper[4675]: I1125 12:46:02.981197 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-k6ghf"] Nov 25 12:46:02 crc kubenswrapper[4675]: I1125 12:46:02.981445 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-k6ghf" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" containerID="cri-o://9615d7abe8242e9b994843e36026f9b5b6f26f3d26426c8e9229972f1d55c0db" gracePeriod=10 Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.259512 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-k6ghf" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.434033 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-mj5tw"] Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.435310 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.437547 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8gfpq" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.437914 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.465846 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-mj5tw"] Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.544712 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgt9g\" (UniqueName: \"kubernetes.io/projected/92b6601f-6d94-4208-843b-a0fe1aac75ed-kube-api-access-lgt9g\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.544988 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-db-sync-config-data\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.545102 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-combined-ca-bundle\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.556993 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-rhqss"] Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.558191 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.560544 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-b6rw9" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.562029 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rhqss"] Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.570483 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.570904 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.646692 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l76j6\" (UniqueName: \"kubernetes.io/projected/c05847eb-7376-4c25-96e6-9218fa514493-kube-api-access-l76j6\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.646781 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-config\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.646824 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgt9g\" (UniqueName: \"kubernetes.io/projected/92b6601f-6d94-4208-843b-a0fe1aac75ed-kube-api-access-lgt9g\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.646841 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-db-sync-config-data\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.646884 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-combined-ca-bundle\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.646925 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-combined-ca-bundle\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.654598 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-combined-ca-bundle\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.669737 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-db-sync-config-data\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.671075 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgt9g\" (UniqueName: \"kubernetes.io/projected/92b6601f-6d94-4208-843b-a0fe1aac75ed-kube-api-access-lgt9g\") pod \"barbican-db-sync-mj5tw\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.748364 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-combined-ca-bundle\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.748449 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l76j6\" (UniqueName: \"kubernetes.io/projected/c05847eb-7376-4c25-96e6-9218fa514493-kube-api-access-l76j6\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.748504 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-config\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.755531 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-config\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.762973 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-combined-ca-bundle\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.766452 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l76j6\" (UniqueName: \"kubernetes.io/projected/c05847eb-7376-4c25-96e6-9218fa514493-kube-api-access-l76j6\") pod \"neutron-db-sync-rhqss\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.774387 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:03 crc kubenswrapper[4675]: W1125 12:46:03.857840 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50bd5686_96db_425f_a4c9_10e84703f8fe.slice/crio-2c8bdb86d5ad1f65bbe443063115202b34f6790dce64403706e6f2f77344d0be WatchSource:0}: Error finding container 2c8bdb86d5ad1f65bbe443063115202b34f6790dce64403706e6f2f77344d0be: Status 404 returned error can't find the container with id 2c8bdb86d5ad1f65bbe443063115202b34f6790dce64403706e6f2f77344d0be Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.889068 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqss" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.923107 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951306 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-combined-ca-bundle\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951498 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-logs\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951563 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-scripts\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951647 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58dcl\" (UniqueName: \"kubernetes.io/projected/be24ac53-f915-4062-8ca5-148425aa5f36-kube-api-access-58dcl\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951806 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-internal-tls-certs\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951926 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-config-data\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.951955 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-httpd-run\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.952152 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"be24ac53-f915-4062-8ca5-148425aa5f36\" (UID: \"be24ac53-f915-4062-8ca5-148425aa5f36\") " Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.960074 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-logs" (OuterVolumeSpecName: "logs") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.964969 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.966694 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.968837 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be24ac53-f915-4062-8ca5-148425aa5f36-kube-api-access-58dcl" (OuterVolumeSpecName: "kube-api-access-58dcl") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "kube-api-access-58dcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.979369 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-scripts" (OuterVolumeSpecName: "scripts") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:03 crc kubenswrapper[4675]: I1125 12:46:03.981141 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.030403 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.055722 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-config-data\") pod \"8e7accff-b51a-446a-adbc-26c5fcd8263d\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.055851 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-combined-ca-bundle\") pod \"8e7accff-b51a-446a-adbc-26c5fcd8263d\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.055945 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8x5ms\" (UniqueName: \"kubernetes.io/projected/8e7accff-b51a-446a-adbc-26c5fcd8263d-kube-api-access-8x5ms\") pod \"8e7accff-b51a-446a-adbc-26c5fcd8263d\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056005 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-fernet-keys\") pod \"8e7accff-b51a-446a-adbc-26c5fcd8263d\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056062 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-scripts\") pod \"8e7accff-b51a-446a-adbc-26c5fcd8263d\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056110 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-credential-keys\") pod \"8e7accff-b51a-446a-adbc-26c5fcd8263d\" (UID: \"8e7accff-b51a-446a-adbc-26c5fcd8263d\") " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056463 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58dcl\" (UniqueName: \"kubernetes.io/projected/be24ac53-f915-4062-8ca5-148425aa5f36-kube-api-access-58dcl\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056479 4675 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056498 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056507 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056516 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be24ac53-f915-4062-8ca5-148425aa5f36-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.056524 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.082735 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e7accff-b51a-446a-adbc-26c5fcd8263d-kube-api-access-8x5ms" (OuterVolumeSpecName: "kube-api-access-8x5ms") pod "8e7accff-b51a-446a-adbc-26c5fcd8263d" (UID: "8e7accff-b51a-446a-adbc-26c5fcd8263d"). InnerVolumeSpecName "kube-api-access-8x5ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.087408 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "8e7accff-b51a-446a-adbc-26c5fcd8263d" (UID: "8e7accff-b51a-446a-adbc-26c5fcd8263d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.094229 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "8e7accff-b51a-446a-adbc-26c5fcd8263d" (UID: "8e7accff-b51a-446a-adbc-26c5fcd8263d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.094691 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-scripts" (OuterVolumeSpecName: "scripts") pod "8e7accff-b51a-446a-adbc-26c5fcd8263d" (UID: "8e7accff-b51a-446a-adbc-26c5fcd8263d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.100296 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.107180 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.109404 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e7accff-b51a-446a-adbc-26c5fcd8263d" (UID: "8e7accff-b51a-446a-adbc-26c5fcd8263d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.118553 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-config-data" (OuterVolumeSpecName: "config-data") pod "be24ac53-f915-4062-8ca5-148425aa5f36" (UID: "be24ac53-f915-4062-8ca5-148425aa5f36"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.136232 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"50bd5686-96db-425f-a4c9-10e84703f8fe","Type":"ContainerStarted","Data":"2c8bdb86d5ad1f65bbe443063115202b34f6790dce64403706e6f2f77344d0be"} Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.139333 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be24ac53-f915-4062-8ca5-148425aa5f36","Type":"ContainerDied","Data":"9ac8e5b19714ed1704a2a54091049a05f9b95bc4a26824d7682aa4c4fc2d6bdf"} Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.139400 4675 scope.go:117] "RemoveContainer" containerID="760ddb3730823d40aad88c9f539385f53831676cde803a4c68f76f024941c47a" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.139521 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.142336 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-config-data" (OuterVolumeSpecName: "config-data") pod "8e7accff-b51a-446a-adbc-26c5fcd8263d" (UID: "8e7accff-b51a-446a-adbc-26c5fcd8263d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.144777 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7hcbf" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.145106 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7hcbf" event={"ID":"8e7accff-b51a-446a-adbc-26c5fcd8263d","Type":"ContainerDied","Data":"9a89acbac25202b328ac8741e475de992786fae531688a19b25825c826e531b2"} Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.145142 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a89acbac25202b328ac8741e475de992786fae531688a19b25825c826e531b2" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158313 4675 generic.go:334] "Generic (PLEG): container finished" podID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerID="9615d7abe8242e9b994843e36026f9b5b6f26f3d26426c8e9229972f1d55c0db" exitCode=0 Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158362 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-k6ghf" event={"ID":"bce12a31-015a-4f94-9d41-46ad86692cc0","Type":"ContainerDied","Data":"9615d7abe8242e9b994843e36026f9b5b6f26f3d26426c8e9229972f1d55c0db"} Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158921 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158956 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158967 4675 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158976 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8x5ms\" (UniqueName: \"kubernetes.io/projected/8e7accff-b51a-446a-adbc-26c5fcd8263d-kube-api-access-8x5ms\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158984 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be24ac53-f915-4062-8ca5-148425aa5f36-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.158993 4675 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.159002 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.159009 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.159018 4675 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8e7accff-b51a-446a-adbc-26c5fcd8263d-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.242265 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.259071 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.285338 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:46:04 crc kubenswrapper[4675]: E1125 12:46:04.285957 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-log" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.285975 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-log" Nov 25 12:46:04 crc kubenswrapper[4675]: E1125 12:46:04.285995 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-httpd" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.286003 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-httpd" Nov 25 12:46:04 crc kubenswrapper[4675]: E1125 12:46:04.286018 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e7accff-b51a-446a-adbc-26c5fcd8263d" containerName="keystone-bootstrap" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.286027 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e7accff-b51a-446a-adbc-26c5fcd8263d" containerName="keystone-bootstrap" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.286243 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-log" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.286495 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" containerName="glance-httpd" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.286517 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e7accff-b51a-446a-adbc-26c5fcd8263d" containerName="keystone-bootstrap" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.287682 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.295959 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.296135 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.297495 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.361493 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-logs\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.361663 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.362123 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.362192 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqvj7\" (UniqueName: \"kubernetes.io/projected/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-kube-api-access-nqvj7\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.362216 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.362288 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-scripts\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.362350 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-config-data\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.362373 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464138 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-config-data\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464177 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464203 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-logs\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464376 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464465 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464492 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqvj7\" (UniqueName: \"kubernetes.io/projected/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-kube-api-access-nqvj7\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464509 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464553 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-scripts\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.464588 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.468555 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.469104 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-logs\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.470665 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-scripts\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.472545 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.482019 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-config-data\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.482108 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.485484 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqvj7\" (UniqueName: \"kubernetes.io/projected/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-kube-api-access-nqvj7\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.517125 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:46:04 crc kubenswrapper[4675]: I1125 12:46:04.631369 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.079568 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7hcbf"] Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.100339 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7hcbf"] Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.162873 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-slqsb"] Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.164732 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.168027 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.168196 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.168299 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-69vb7" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.168354 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.177930 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-config-data\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.178021 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw4sj\" (UniqueName: \"kubernetes.io/projected/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-kube-api-access-rw4sj\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.178196 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-fernet-keys\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.178277 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-combined-ca-bundle\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.178325 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-credential-keys\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.178344 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-scripts\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.181134 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-slqsb"] Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.279784 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-credential-keys\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.279844 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-scripts\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.279951 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-config-data\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.279974 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw4sj\" (UniqueName: \"kubernetes.io/projected/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-kube-api-access-rw4sj\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.279999 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-fernet-keys\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.280016 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-combined-ca-bundle\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.284289 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-scripts\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.284357 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-credential-keys\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.288325 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-fernet-keys\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.294733 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-combined-ca-bundle\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.302218 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-config-data\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.307669 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw4sj\" (UniqueName: \"kubernetes.io/projected/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-kube-api-access-rw4sj\") pod \"keystone-bootstrap-slqsb\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.483083 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.569692 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e7accff-b51a-446a-adbc-26c5fcd8263d" path="/var/lib/kubelet/pods/8e7accff-b51a-446a-adbc-26c5fcd8263d/volumes" Nov 25 12:46:05 crc kubenswrapper[4675]: I1125 12:46:05.570467 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be24ac53-f915-4062-8ca5-148425aa5f36" path="/var/lib/kubelet/pods/be24ac53-f915-4062-8ca5-148425aa5f36/volumes" Nov 25 12:46:12 crc kubenswrapper[4675]: E1125 12:46:12.208444 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 12:46:12 crc kubenswrapper[4675]: E1125 12:46:12.209092 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n56fh589h5b8h569h55fhb7h557hb6h568h95h57ch675h5ddhf4hdfh55fh679h595h55ch697hcchd6h578h86h5c4hc7h8dh5f6h7chc8h5b4h58cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nkczl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-66d446568f-7dtvh_openstack(4071e003-9808-4911-9cbc-5530d9400322): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:46:12 crc kubenswrapper[4675]: E1125 12:46:12.211063 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-66d446568f-7dtvh" podUID="4071e003-9808-4911-9cbc-5530d9400322" Nov 25 12:46:13 crc kubenswrapper[4675]: I1125 12:46:13.259865 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-k6ghf" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Nov 25 12:46:13 crc kubenswrapper[4675]: I1125 12:46:13.662130 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:46:13 crc kubenswrapper[4675]: I1125 12:46:13.662636 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:46:14 crc kubenswrapper[4675]: E1125 12:46:14.839539 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 12:46:14 crc kubenswrapper[4675]: E1125 12:46:14.839792 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n547h584h5c9h89hc4h65fh565h96h57h7dhbbh658h85h65bh98h544hc5h56h5cfh598h5cchb7h56dh5d4h66bh5b8h55ch85h99hdfh59dhd5q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dxv5v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-756fb5694f-vqml4_openstack(32e0bbdb-534b-444c-850b-fb9257b211d7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:46:14 crc kubenswrapper[4675]: E1125 12:46:14.842920 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-756fb5694f-vqml4" podUID="32e0bbdb-534b-444c-850b-fb9257b211d7" Nov 25 12:46:14 crc kubenswrapper[4675]: I1125 12:46:14.939987 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.075110 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-nb\") pod \"bce12a31-015a-4f94-9d41-46ad86692cc0\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.075216 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4qvf\" (UniqueName: \"kubernetes.io/projected/bce12a31-015a-4f94-9d41-46ad86692cc0-kube-api-access-x4qvf\") pod \"bce12a31-015a-4f94-9d41-46ad86692cc0\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.075269 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-config\") pod \"bce12a31-015a-4f94-9d41-46ad86692cc0\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.075356 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-dns-svc\") pod \"bce12a31-015a-4f94-9d41-46ad86692cc0\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.075464 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-sb\") pod \"bce12a31-015a-4f94-9d41-46ad86692cc0\" (UID: \"bce12a31-015a-4f94-9d41-46ad86692cc0\") " Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.081670 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bce12a31-015a-4f94-9d41-46ad86692cc0-kube-api-access-x4qvf" (OuterVolumeSpecName: "kube-api-access-x4qvf") pod "bce12a31-015a-4f94-9d41-46ad86692cc0" (UID: "bce12a31-015a-4f94-9d41-46ad86692cc0"). InnerVolumeSpecName "kube-api-access-x4qvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.138846 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bce12a31-015a-4f94-9d41-46ad86692cc0" (UID: "bce12a31-015a-4f94-9d41-46ad86692cc0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.139045 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bce12a31-015a-4f94-9d41-46ad86692cc0" (UID: "bce12a31-015a-4f94-9d41-46ad86692cc0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.157314 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bce12a31-015a-4f94-9d41-46ad86692cc0" (UID: "bce12a31-015a-4f94-9d41-46ad86692cc0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.157312 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-config" (OuterVolumeSpecName: "config") pod "bce12a31-015a-4f94-9d41-46ad86692cc0" (UID: "bce12a31-015a-4f94-9d41-46ad86692cc0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.178031 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.178073 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.178088 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4qvf\" (UniqueName: \"kubernetes.io/projected/bce12a31-015a-4f94-9d41-46ad86692cc0-kube-api-access-x4qvf\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.178101 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.178112 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bce12a31-015a-4f94-9d41-46ad86692cc0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.286078 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-k6ghf" Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.291955 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-k6ghf" event={"ID":"bce12a31-015a-4f94-9d41-46ad86692cc0","Type":"ContainerDied","Data":"5ab70f57b41994cf6903e97913e49aff3b156c2099300da6a08b8b3b34cd8e4f"} Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.341299 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-k6ghf"] Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.349536 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-k6ghf"] Nov 25 12:46:15 crc kubenswrapper[4675]: I1125 12:46:15.554795 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" path="/var/lib/kubelet/pods/bce12a31-015a-4f94-9d41-46ad86692cc0/volumes" Nov 25 12:46:18 crc kubenswrapper[4675]: I1125 12:46:18.260219 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-k6ghf" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.009906 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.158220 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4071e003-9808-4911-9cbc-5530d9400322-logs\") pod \"4071e003-9808-4911-9cbc-5530d9400322\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.158483 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkczl\" (UniqueName: \"kubernetes.io/projected/4071e003-9808-4911-9cbc-5530d9400322-kube-api-access-nkczl\") pod \"4071e003-9808-4911-9cbc-5530d9400322\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.158591 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4071e003-9808-4911-9cbc-5530d9400322-horizon-secret-key\") pod \"4071e003-9808-4911-9cbc-5530d9400322\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.158619 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-config-data\") pod \"4071e003-9808-4911-9cbc-5530d9400322\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.158640 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-scripts\") pod \"4071e003-9808-4911-9cbc-5530d9400322\" (UID: \"4071e003-9808-4911-9cbc-5530d9400322\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.159506 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-scripts" (OuterVolumeSpecName: "scripts") pod "4071e003-9808-4911-9cbc-5530d9400322" (UID: "4071e003-9808-4911-9cbc-5530d9400322"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.160059 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4071e003-9808-4911-9cbc-5530d9400322-logs" (OuterVolumeSpecName: "logs") pod "4071e003-9808-4911-9cbc-5530d9400322" (UID: "4071e003-9808-4911-9cbc-5530d9400322"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.160522 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-config-data" (OuterVolumeSpecName: "config-data") pod "4071e003-9808-4911-9cbc-5530d9400322" (UID: "4071e003-9808-4911-9cbc-5530d9400322"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.168440 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4071e003-9808-4911-9cbc-5530d9400322-kube-api-access-nkczl" (OuterVolumeSpecName: "kube-api-access-nkczl") pod "4071e003-9808-4911-9cbc-5530d9400322" (UID: "4071e003-9808-4911-9cbc-5530d9400322"). InnerVolumeSpecName "kube-api-access-nkczl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.171763 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4071e003-9808-4911-9cbc-5530d9400322-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4071e003-9808-4911-9cbc-5530d9400322" (UID: "4071e003-9808-4911-9cbc-5530d9400322"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.260267 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkczl\" (UniqueName: \"kubernetes.io/projected/4071e003-9808-4911-9cbc-5530d9400322-kube-api-access-nkczl\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.260298 4675 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4071e003-9808-4911-9cbc-5530d9400322-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.260307 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.260315 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4071e003-9808-4911-9cbc-5530d9400322-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.260325 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4071e003-9808-4911-9cbc-5530d9400322-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.366053 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66d446568f-7dtvh" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.367911 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66d446568f-7dtvh" event={"ID":"4071e003-9808-4911-9cbc-5530d9400322","Type":"ContainerDied","Data":"c5ef0dc26a955b33c2ebc493e739b05b3db5c09fc07ea22379b8108f19a6cc9c"} Nov 25 12:46:25 crc kubenswrapper[4675]: E1125 12:46:25.381246 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 25 12:46:25 crc kubenswrapper[4675]: E1125 12:46:25.381485 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n59fh56ch665h9bh566h64dhf6h89h66bh556h9ch55fhffh56h55h678h669h588hd4h577h685h9ch67ch5c6h58bh668h576h554hd7h65fh584h8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-98q44,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c38138b1-41d4-4e17-94a8-c1a66a3725c3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.429623 4675 scope.go:117] "RemoveContainer" containerID="d6b57109bed52766071b186bac6816d3e4b490a8088536efd4c7be58a286976f" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.464146 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.471240 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-66d446568f-7dtvh"] Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.480919 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-66d446568f-7dtvh"] Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.556846 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4071e003-9808-4911-9cbc-5530d9400322" path="/var/lib/kubelet/pods/4071e003-9808-4911-9cbc-5530d9400322/volumes" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.665212 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-scripts\") pod \"32e0bbdb-534b-444c-850b-fb9257b211d7\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.665376 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32e0bbdb-534b-444c-850b-fb9257b211d7-logs\") pod \"32e0bbdb-534b-444c-850b-fb9257b211d7\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.665403 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/32e0bbdb-534b-444c-850b-fb9257b211d7-horizon-secret-key\") pod \"32e0bbdb-534b-444c-850b-fb9257b211d7\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.665438 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxv5v\" (UniqueName: \"kubernetes.io/projected/32e0bbdb-534b-444c-850b-fb9257b211d7-kube-api-access-dxv5v\") pod \"32e0bbdb-534b-444c-850b-fb9257b211d7\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.665472 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-config-data\") pod \"32e0bbdb-534b-444c-850b-fb9257b211d7\" (UID: \"32e0bbdb-534b-444c-850b-fb9257b211d7\") " Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.665802 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e0bbdb-534b-444c-850b-fb9257b211d7-logs" (OuterVolumeSpecName: "logs") pod "32e0bbdb-534b-444c-850b-fb9257b211d7" (UID: "32e0bbdb-534b-444c-850b-fb9257b211d7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.666220 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32e0bbdb-534b-444c-850b-fb9257b211d7-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.666832 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-config-data" (OuterVolumeSpecName: "config-data") pod "32e0bbdb-534b-444c-850b-fb9257b211d7" (UID: "32e0bbdb-534b-444c-850b-fb9257b211d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.667001 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-scripts" (OuterVolumeSpecName: "scripts") pod "32e0bbdb-534b-444c-850b-fb9257b211d7" (UID: "32e0bbdb-534b-444c-850b-fb9257b211d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.669380 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32e0bbdb-534b-444c-850b-fb9257b211d7-kube-api-access-dxv5v" (OuterVolumeSpecName: "kube-api-access-dxv5v") pod "32e0bbdb-534b-444c-850b-fb9257b211d7" (UID: "32e0bbdb-534b-444c-850b-fb9257b211d7"). InnerVolumeSpecName "kube-api-access-dxv5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.670011 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e0bbdb-534b-444c-850b-fb9257b211d7-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "32e0bbdb-534b-444c-850b-fb9257b211d7" (UID: "32e0bbdb-534b-444c-850b-fb9257b211d7"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.767736 4675 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/32e0bbdb-534b-444c-850b-fb9257b211d7-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.767774 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxv5v\" (UniqueName: \"kubernetes.io/projected/32e0bbdb-534b-444c-850b-fb9257b211d7-kube-api-access-dxv5v\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.767789 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.767801 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32e0bbdb-534b-444c-850b-fb9257b211d7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:25 crc kubenswrapper[4675]: I1125 12:46:25.866665 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85d4f84f96-fcncp"] Nov 25 12:46:26 crc kubenswrapper[4675]: I1125 12:46:26.271721 4675 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod58908a5d-b2bd-4f7d-9ef0-ef85a87096ef"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod58908a5d-b2bd-4f7d-9ef0-ef85a87096ef] : Timed out while waiting for systemd to remove kubepods-besteffort-pod58908a5d_b2bd_4f7d_9ef0_ef85a87096ef.slice" Nov 25 12:46:26 crc kubenswrapper[4675]: I1125 12:46:26.381801 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-756fb5694f-vqml4" event={"ID":"32e0bbdb-534b-444c-850b-fb9257b211d7","Type":"ContainerDied","Data":"10e00ccb1a0a2371e1fc553b64ce3d00ca03eae208a28d5d8d736b51b4201927"} Nov 25 12:46:26 crc kubenswrapper[4675]: I1125 12:46:26.382113 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-756fb5694f-vqml4" Nov 25 12:46:26 crc kubenswrapper[4675]: I1125 12:46:26.457272 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-756fb5694f-vqml4"] Nov 25 12:46:26 crc kubenswrapper[4675]: I1125 12:46:26.463985 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-756fb5694f-vqml4"] Nov 25 12:46:26 crc kubenswrapper[4675]: W1125 12:46:26.961207 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod412d2040_4c83_4443_989e_cc844466e840.slice/crio-4f38a50187af01aa60c05ade03ea3f950c6a3f8e0fc22161574406c314cbe69d WatchSource:0}: Error finding container 4f38a50187af01aa60c05ade03ea3f950c6a3f8e0fc22161574406c314cbe69d: Status 404 returned error can't find the container with id 4f38a50187af01aa60c05ade03ea3f950c6a3f8e0fc22161574406c314cbe69d Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:26.999553 4675 scope.go:117] "RemoveContainer" containerID="9615d7abe8242e9b994843e36026f9b5b6f26f3d26426c8e9229972f1d55c0db" Nov 25 12:46:27 crc kubenswrapper[4675]: E1125 12:46:27.088142 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 12:46:27 crc kubenswrapper[4675]: E1125 12:46:27.088289 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j8sgr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-rxzpl_openstack(8ee67608-bfaa-407c-9256-488729244fe0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:46:27 crc kubenswrapper[4675]: E1125 12:46:27.089611 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-rxzpl" podUID="8ee67608-bfaa-407c-9256-488729244fe0" Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.247806 4675 scope.go:117] "RemoveContainer" containerID="b56454f9271d101890817656e721c10cb4bc123722e66e86adc8ee42ad668e2e" Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.406611 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerStarted","Data":"a5cb3d8c30412353eaad43f0a038ca9def4fda7f21d726c08a7f6e5da222c0f2"} Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.406647 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerStarted","Data":"4f38a50187af01aa60c05ade03ea3f950c6a3f8e0fc22161574406c314cbe69d"} Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.414764 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld5wn" event={"ID":"eba420bb-9044-4e38-bcd8-11e51c903cac","Type":"ContainerStarted","Data":"5616dc35c507a1d4da0cb3cfce369d6221b6fbf4c34065e4717fc0016d763fae"} Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.420902 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74dcc94bb9-qjv6x" event={"ID":"e30a18e5-593a-404a-9bc3-0ac55ecf6d94","Type":"ContainerStarted","Data":"bf7b9ccab814019368852cc632af2e53a0993b3b35e417cb3ea473d01a2b8bfe"} Nov 25 12:46:27 crc kubenswrapper[4675]: E1125 12:46:27.424496 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-rxzpl" podUID="8ee67608-bfaa-407c-9256-488729244fe0" Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.438226 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-ld5wn" podStartSLOduration=4.278221142 podStartE2EDuration="35.438209655s" podCreationTimestamp="2025-11-25 12:45:52 +0000 UTC" firstStartedPulling="2025-11-25 12:45:54.203576655 +0000 UTC m=+1099.375168996" lastFinishedPulling="2025-11-25 12:46:25.363565168 +0000 UTC m=+1130.535157509" observedRunningTime="2025-11-25 12:46:27.430530166 +0000 UTC m=+1132.602122537" watchObservedRunningTime="2025-11-25 12:46:27.438209655 +0000 UTC m=+1132.609801996" Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.477363 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6df5497f4d-4g9tv"] Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.544928 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32e0bbdb-534b-444c-850b-fb9257b211d7" path="/var/lib/kubelet/pods/32e0bbdb-534b-444c-850b-fb9257b211d7/volumes" Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.655997 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-slqsb"] Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.665504 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-mj5tw"] Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.680779 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rhqss"] Nov 25 12:46:27 crc kubenswrapper[4675]: W1125 12:46:27.686575 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92b6601f_6d94_4208_843b_a0fe1aac75ed.slice/crio-e20253d95d6d04ac22794290b143a64adbf089f6c19e5822887e0283f162310b WatchSource:0}: Error finding container e20253d95d6d04ac22794290b143a64adbf089f6c19e5822887e0283f162310b: Status 404 returned error can't find the container with id e20253d95d6d04ac22794290b143a64adbf089f6c19e5822887e0283f162310b Nov 25 12:46:27 crc kubenswrapper[4675]: W1125 12:46:27.688282 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ddf9f0c_7045_4afe_945e_6c6f04f3e699.slice/crio-67f56ab102bdfa65f0c4a537cfd3e7a3d0c322020174c61c7688ed0a959f8b63 WatchSource:0}: Error finding container 67f56ab102bdfa65f0c4a537cfd3e7a3d0c322020174c61c7688ed0a959f8b63: Status 404 returned error can't find the container with id 67f56ab102bdfa65f0c4a537cfd3e7a3d0c322020174c61c7688ed0a959f8b63 Nov 25 12:46:27 crc kubenswrapper[4675]: W1125 12:46:27.690239 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc05847eb_7376_4c25_96e6_9218fa514493.slice/crio-5f8d8be246a3fc2897848e746de73342223c6bb8bdc859ae2c1006d0ada0cffd WatchSource:0}: Error finding container 5f8d8be246a3fc2897848e746de73342223c6bb8bdc859ae2c1006d0ada0cffd: Status 404 returned error can't find the container with id 5f8d8be246a3fc2897848e746de73342223c6bb8bdc859ae2c1006d0ada0cffd Nov 25 12:46:27 crc kubenswrapper[4675]: I1125 12:46:27.883877 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.448001 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerStarted","Data":"b8727dfcb754c1134bbfe12d6c7ac53701bf0d8ea86188c3e45e1d8ade2e2c7f"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.517398 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqss" event={"ID":"c05847eb-7376-4c25-96e6-9218fa514493","Type":"ContainerStarted","Data":"0b663ec7c4f5e1fe8005bb05b1f4b514f633f609b2e481aa9cc021138088547d"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.517688 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqss" event={"ID":"c05847eb-7376-4c25-96e6-9218fa514493","Type":"ContainerStarted","Data":"5f8d8be246a3fc2897848e746de73342223c6bb8bdc859ae2c1006d0ada0cffd"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.544495 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-85d4f84f96-fcncp" podStartSLOduration=28.544470562 podStartE2EDuration="28.544470562s" podCreationTimestamp="2025-11-25 12:46:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:28.519299905 +0000 UTC m=+1133.690892246" watchObservedRunningTime="2025-11-25 12:46:28.544470562 +0000 UTC m=+1133.716062903" Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.549410 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-slqsb" event={"ID":"3ddf9f0c-7045-4afe-945e-6c6f04f3e699","Type":"ContainerStarted","Data":"aba56ca1748ab9c460cc6b34d2d76f9c5f1cb56802ac688e8e0672d2a9b6ccbc"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.549462 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-slqsb" event={"ID":"3ddf9f0c-7045-4afe-945e-6c6f04f3e699","Type":"ContainerStarted","Data":"67f56ab102bdfa65f0c4a537cfd3e7a3d0c322020174c61c7688ed0a959f8b63"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.574629 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-74dcc94bb9-qjv6x" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon-log" containerID="cri-o://bf7b9ccab814019368852cc632af2e53a0993b3b35e417cb3ea473d01a2b8bfe" gracePeriod=30 Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.574795 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74dcc94bb9-qjv6x" event={"ID":"e30a18e5-593a-404a-9bc3-0ac55ecf6d94","Type":"ContainerStarted","Data":"fcace78d1b1e66c2fffddc02d605118279839dca308bf6e560fffedfac4d2de6"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.574949 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-74dcc94bb9-qjv6x" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon" containerID="cri-o://fcace78d1b1e66c2fffddc02d605118279839dca308bf6e560fffedfac4d2de6" gracePeriod=30 Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.594138 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-rhqss" podStartSLOduration=25.594115135 podStartE2EDuration="25.594115135s" podCreationTimestamp="2025-11-25 12:46:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:28.540144612 +0000 UTC m=+1133.711736953" watchObservedRunningTime="2025-11-25 12:46:28.594115135 +0000 UTC m=+1133.765707486" Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.603510 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-slqsb" podStartSLOduration=23.603493969 podStartE2EDuration="23.603493969s" podCreationTimestamp="2025-11-25 12:46:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:28.577728582 +0000 UTC m=+1133.749320924" watchObservedRunningTime="2025-11-25 12:46:28.603493969 +0000 UTC m=+1133.775086310" Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.609021 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-mj5tw" event={"ID":"92b6601f-6d94-4208-843b-a0fe1aac75ed","Type":"ContainerStarted","Data":"e20253d95d6d04ac22794290b143a64adbf089f6c19e5822887e0283f162310b"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.622648 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09f8f8d4-54c1-44e2-8bbf-1d561cf78572","Type":"ContainerStarted","Data":"3db6d63e7ae7145da0caa493a0d83e6641ba8cb92ae898c9ab06ec99ab8d7276"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.630941 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-74dcc94bb9-qjv6x" podStartSLOduration=5.417450967 podStartE2EDuration="33.6309215s" podCreationTimestamp="2025-11-25 12:45:55 +0000 UTC" firstStartedPulling="2025-11-25 12:45:57.199052285 +0000 UTC m=+1102.370644626" lastFinishedPulling="2025-11-25 12:46:25.412522818 +0000 UTC m=+1130.584115159" observedRunningTime="2025-11-25 12:46:28.616645036 +0000 UTC m=+1133.788237387" watchObservedRunningTime="2025-11-25 12:46:28.6309215 +0000 UTC m=+1133.802513841" Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.649784 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerStarted","Data":"7bf144cdeff17dc28536fb9d88234abffec553b120667f546a29ca608a4da773"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.649883 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerStarted","Data":"70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.649893 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerStarted","Data":"764f18847fe6852a3dbce69e26be8f51a0ecefc23fd9dfba73ff8543c2406f20"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.700241 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"50bd5686-96db-425f-a4c9-10e84703f8fe","Type":"ContainerStarted","Data":"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c"} Nov 25 12:46:28 crc kubenswrapper[4675]: I1125 12:46:28.700546 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6df5497f4d-4g9tv" podStartSLOduration=28.70052 podStartE2EDuration="28.70052s" podCreationTimestamp="2025-11-25 12:46:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:28.682208896 +0000 UTC m=+1133.853801237" watchObservedRunningTime="2025-11-25 12:46:28.70052 +0000 UTC m=+1133.872112341" Nov 25 12:46:29 crc kubenswrapper[4675]: I1125 12:46:29.716396 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"50bd5686-96db-425f-a4c9-10e84703f8fe","Type":"ContainerStarted","Data":"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107"} Nov 25 12:46:29 crc kubenswrapper[4675]: I1125 12:46:29.716898 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-log" containerID="cri-o://5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c" gracePeriod=30 Nov 25 12:46:29 crc kubenswrapper[4675]: I1125 12:46:29.717354 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-httpd" containerID="cri-o://fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107" gracePeriod=30 Nov 25 12:46:29 crc kubenswrapper[4675]: I1125 12:46:29.726242 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerStarted","Data":"fbddd47512880ae7eb09023ee3ee2a541f1b4a0a05a9ed04baa08307ec5af807"} Nov 25 12:46:29 crc kubenswrapper[4675]: I1125 12:46:29.730940 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09f8f8d4-54c1-44e2-8bbf-1d561cf78572","Type":"ContainerStarted","Data":"9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f"} Nov 25 12:46:29 crc kubenswrapper[4675]: I1125 12:46:29.759255 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=30.759237464 podStartE2EDuration="30.759237464s" podCreationTimestamp="2025-11-25 12:45:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:29.7535802 +0000 UTC m=+1134.925172561" watchObservedRunningTime="2025-11-25 12:46:29.759237464 +0000 UTC m=+1134.930829805" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.469573 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573333 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-scripts\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573393 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-public-tls-certs\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573462 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-config-data\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573512 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wmpn\" (UniqueName: \"kubernetes.io/projected/50bd5686-96db-425f-a4c9-10e84703f8fe-kube-api-access-2wmpn\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573546 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-httpd-run\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573586 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-combined-ca-bundle\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573612 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.573656 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-logs\") pod \"50bd5686-96db-425f-a4c9-10e84703f8fe\" (UID: \"50bd5686-96db-425f-a4c9-10e84703f8fe\") " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.574627 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.575033 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-logs" (OuterVolumeSpecName: "logs") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.575937 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.575955 4675 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50bd5686-96db-425f-a4c9-10e84703f8fe-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.581105 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50bd5686-96db-425f-a4c9-10e84703f8fe-kube-api-access-2wmpn" (OuterVolumeSpecName: "kube-api-access-2wmpn") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "kube-api-access-2wmpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.585866 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.594142 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-scripts" (OuterVolumeSpecName: "scripts") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.630647 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.677535 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.677585 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.677600 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.677615 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wmpn\" (UniqueName: \"kubernetes.io/projected/50bd5686-96db-425f-a4c9-10e84703f8fe-kube-api-access-2wmpn\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.687611 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-config-data" (OuterVolumeSpecName: "config-data") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.707514 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.716186 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "50bd5686-96db-425f-a4c9-10e84703f8fe" (UID: "50bd5686-96db-425f-a4c9-10e84703f8fe"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.753679 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09f8f8d4-54c1-44e2-8bbf-1d561cf78572","Type":"ContainerStarted","Data":"3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe"} Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.761932 4675 generic.go:334] "Generic (PLEG): container finished" podID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerID="fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107" exitCode=143 Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.761969 4675 generic.go:334] "Generic (PLEG): container finished" podID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerID="5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c" exitCode=143 Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.761996 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"50bd5686-96db-425f-a4c9-10e84703f8fe","Type":"ContainerDied","Data":"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107"} Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.762027 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"50bd5686-96db-425f-a4c9-10e84703f8fe","Type":"ContainerDied","Data":"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c"} Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.762041 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"50bd5686-96db-425f-a4c9-10e84703f8fe","Type":"ContainerDied","Data":"2c8bdb86d5ad1f65bbe443063115202b34f6790dce64403706e6f2f77344d0be"} Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.762059 4675 scope.go:117] "RemoveContainer" containerID="fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.762213 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.779030 4675 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.779063 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bd5686-96db-425f-a4c9-10e84703f8fe-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.779077 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.779886 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=26.779876091 podStartE2EDuration="26.779876091s" podCreationTimestamp="2025-11-25 12:46:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:30.777447761 +0000 UTC m=+1135.949040102" watchObservedRunningTime="2025-11-25 12:46:30.779876091 +0000 UTC m=+1135.951468432" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.812939 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.825512 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853064 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:30 crc kubenswrapper[4675]: E1125 12:46:30.853449 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853465 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" Nov 25 12:46:30 crc kubenswrapper[4675]: E1125 12:46:30.853478 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-httpd" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853484 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-httpd" Nov 25 12:46:30 crc kubenswrapper[4675]: E1125 12:46:30.853503 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-log" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853509 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-log" Nov 25 12:46:30 crc kubenswrapper[4675]: E1125 12:46:30.853523 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="init" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853528 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="init" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853695 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce12a31-015a-4f94-9d41-46ad86692cc0" containerName="dnsmasq-dns" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853717 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-log" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.853729 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" containerName="glance-httpd" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.854666 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.858681 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.859143 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.873156 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.894464 4675 scope.go:117] "RemoveContainer" containerID="5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985001 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985237 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-logs\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985254 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-scripts\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985273 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-config-data\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985318 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985361 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9gj4\" (UniqueName: \"kubernetes.io/projected/e6f055cd-061d-43e5-8645-e351a7558608-kube-api-access-w9gj4\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985594 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:30 crc kubenswrapper[4675]: I1125 12:46:30.985618 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.013985 4675 scope.go:117] "RemoveContainer" containerID="fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107" Nov 25 12:46:31 crc kubenswrapper[4675]: E1125 12:46:31.015499 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107\": container with ID starting with fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107 not found: ID does not exist" containerID="fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.015529 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107"} err="failed to get container status \"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107\": rpc error: code = NotFound desc = could not find container \"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107\": container with ID starting with fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107 not found: ID does not exist" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.015552 4675 scope.go:117] "RemoveContainer" containerID="5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c" Nov 25 12:46:31 crc kubenswrapper[4675]: E1125 12:46:31.016157 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c\": container with ID starting with 5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c not found: ID does not exist" containerID="5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.016180 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c"} err="failed to get container status \"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c\": rpc error: code = NotFound desc = could not find container \"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c\": container with ID starting with 5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c not found: ID does not exist" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.016195 4675 scope.go:117] "RemoveContainer" containerID="fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.017355 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107"} err="failed to get container status \"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107\": rpc error: code = NotFound desc = could not find container \"fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107\": container with ID starting with fc866cd3199ef83d8fb2a5acb41ff7bfa3dc60fe4cb45df9fd3fb2e1dfbcf107 not found: ID does not exist" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.017373 4675 scope.go:117] "RemoveContainer" containerID="5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.018995 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c"} err="failed to get container status \"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c\": rpc error: code = NotFound desc = could not find container \"5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c\": container with ID starting with 5814e0d402ebe0face557ec9629772bf6d01d7fddff642720e2c0efdc023d05c not found: ID does not exist" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.086968 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087036 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9gj4\" (UniqueName: \"kubernetes.io/projected/e6f055cd-061d-43e5-8645-e351a7558608-kube-api-access-w9gj4\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087079 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087111 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087156 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087182 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-logs\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087195 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-scripts\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087215 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-config-data\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087539 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.087873 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.090286 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-logs\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.098019 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-scripts\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.113531 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9gj4\" (UniqueName: \"kubernetes.io/projected/e6f055cd-061d-43e5-8645-e351a7558608-kube-api-access-w9gj4\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.142492 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-config-data\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.148653 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.162801 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.176135 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.218226 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.253305 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.253630 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.374692 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.376045 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.544128 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50bd5686-96db-425f-a4c9-10e84703f8fe" path="/var/lib/kubelet/pods/50bd5686-96db-425f-a4c9-10e84703f8fe/volumes" Nov 25 12:46:31 crc kubenswrapper[4675]: I1125 12:46:31.821430 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:46:31 crc kubenswrapper[4675]: W1125 12:46:31.843459 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6f055cd_061d_43e5_8645_e351a7558608.slice/crio-ed6048ecb1edf39a412bc459cfe0777b9de4e7d9823f4ed58a287a6a1910bfbb WatchSource:0}: Error finding container ed6048ecb1edf39a412bc459cfe0777b9de4e7d9823f4ed58a287a6a1910bfbb: Status 404 returned error can't find the container with id ed6048ecb1edf39a412bc459cfe0777b9de4e7d9823f4ed58a287a6a1910bfbb Nov 25 12:46:32 crc kubenswrapper[4675]: I1125 12:46:32.789349 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e6f055cd-061d-43e5-8645-e351a7558608","Type":"ContainerStarted","Data":"ed6048ecb1edf39a412bc459cfe0777b9de4e7d9823f4ed58a287a6a1910bfbb"} Nov 25 12:46:32 crc kubenswrapper[4675]: I1125 12:46:32.793526 4675 generic.go:334] "Generic (PLEG): container finished" podID="eba420bb-9044-4e38-bcd8-11e51c903cac" containerID="5616dc35c507a1d4da0cb3cfce369d6221b6fbf4c34065e4717fc0016d763fae" exitCode=0 Nov 25 12:46:32 crc kubenswrapper[4675]: I1125 12:46:32.793578 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld5wn" event={"ID":"eba420bb-9044-4e38-bcd8-11e51c903cac","Type":"ContainerDied","Data":"5616dc35c507a1d4da0cb3cfce369d6221b6fbf4c34065e4717fc0016d763fae"} Nov 25 12:46:34 crc kubenswrapper[4675]: I1125 12:46:34.632537 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:34 crc kubenswrapper[4675]: I1125 12:46:34.633959 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:34 crc kubenswrapper[4675]: I1125 12:46:34.633983 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:34 crc kubenswrapper[4675]: I1125 12:46:34.634069 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:34 crc kubenswrapper[4675]: I1125 12:46:34.709047 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:34 crc kubenswrapper[4675]: I1125 12:46:34.709440 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.090032 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld5wn" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.169685 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-config-data\") pod \"eba420bb-9044-4e38-bcd8-11e51c903cac\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.169765 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncjkb\" (UniqueName: \"kubernetes.io/projected/eba420bb-9044-4e38-bcd8-11e51c903cac-kube-api-access-ncjkb\") pod \"eba420bb-9044-4e38-bcd8-11e51c903cac\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.169908 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-scripts\") pod \"eba420bb-9044-4e38-bcd8-11e51c903cac\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.169937 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-combined-ca-bundle\") pod \"eba420bb-9044-4e38-bcd8-11e51c903cac\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.170033 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eba420bb-9044-4e38-bcd8-11e51c903cac-logs\") pod \"eba420bb-9044-4e38-bcd8-11e51c903cac\" (UID: \"eba420bb-9044-4e38-bcd8-11e51c903cac\") " Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.171162 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eba420bb-9044-4e38-bcd8-11e51c903cac-logs" (OuterVolumeSpecName: "logs") pod "eba420bb-9044-4e38-bcd8-11e51c903cac" (UID: "eba420bb-9044-4e38-bcd8-11e51c903cac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.180064 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-scripts" (OuterVolumeSpecName: "scripts") pod "eba420bb-9044-4e38-bcd8-11e51c903cac" (UID: "eba420bb-9044-4e38-bcd8-11e51c903cac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.180086 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eba420bb-9044-4e38-bcd8-11e51c903cac-kube-api-access-ncjkb" (OuterVolumeSpecName: "kube-api-access-ncjkb") pod "eba420bb-9044-4e38-bcd8-11e51c903cac" (UID: "eba420bb-9044-4e38-bcd8-11e51c903cac"). InnerVolumeSpecName "kube-api-access-ncjkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.199965 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-config-data" (OuterVolumeSpecName: "config-data") pod "eba420bb-9044-4e38-bcd8-11e51c903cac" (UID: "eba420bb-9044-4e38-bcd8-11e51c903cac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.214517 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eba420bb-9044-4e38-bcd8-11e51c903cac" (UID: "eba420bb-9044-4e38-bcd8-11e51c903cac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.272232 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.272259 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncjkb\" (UniqueName: \"kubernetes.io/projected/eba420bb-9044-4e38-bcd8-11e51c903cac-kube-api-access-ncjkb\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.272271 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.272280 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eba420bb-9044-4e38-bcd8-11e51c903cac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.272288 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eba420bb-9044-4e38-bcd8-11e51c903cac-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.819128 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld5wn" event={"ID":"eba420bb-9044-4e38-bcd8-11e51c903cac","Type":"ContainerDied","Data":"a3d3434fe321274a1f453e52cd3c38529c256414a94b8e30ba1cb49e09532f7e"} Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.820091 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3d3434fe321274a1f453e52cd3c38529c256414a94b8e30ba1cb49e09532f7e" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.820229 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld5wn" Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.825080 4675 generic.go:334] "Generic (PLEG): container finished" podID="3ddf9f0c-7045-4afe-945e-6c6f04f3e699" containerID="aba56ca1748ab9c460cc6b34d2d76f9c5f1cb56802ac688e8e0672d2a9b6ccbc" exitCode=0 Nov 25 12:46:35 crc kubenswrapper[4675]: I1125 12:46:35.825307 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-slqsb" event={"ID":"3ddf9f0c-7045-4afe-945e-6c6f04f3e699","Type":"ContainerDied","Data":"aba56ca1748ab9c460cc6b34d2d76f9c5f1cb56802ac688e8e0672d2a9b6ccbc"} Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.212445 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5bf4848886-8rwx5"] Nov 25 12:46:36 crc kubenswrapper[4675]: E1125 12:46:36.213036 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba420bb-9044-4e38-bcd8-11e51c903cac" containerName="placement-db-sync" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.213053 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba420bb-9044-4e38-bcd8-11e51c903cac" containerName="placement-db-sync" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.213230 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="eba420bb-9044-4e38-bcd8-11e51c903cac" containerName="placement-db-sync" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.214716 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.220273 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.220526 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.220634 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.220735 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hdnd8" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.220863 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.240375 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bf4848886-8rwx5"] Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295323 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-public-tls-certs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295394 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpxcs\" (UniqueName: \"kubernetes.io/projected/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-kube-api-access-rpxcs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295433 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-scripts\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295469 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-combined-ca-bundle\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295505 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-logs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295577 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-internal-tls-certs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.295602 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-config-data\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399004 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-public-tls-certs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399074 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpxcs\" (UniqueName: \"kubernetes.io/projected/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-kube-api-access-rpxcs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399114 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-scripts\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399158 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-combined-ca-bundle\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399201 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-logs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399859 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-internal-tls-certs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.399891 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-config-data\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.402978 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-logs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.405122 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-public-tls-certs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.408849 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-combined-ca-bundle\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.409294 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-internal-tls-certs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.409388 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-config-data\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.411237 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-scripts\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.430707 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpxcs\" (UniqueName: \"kubernetes.io/projected/1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34-kube-api-access-rpxcs\") pod \"placement-5bf4848886-8rwx5\" (UID: \"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34\") " pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.431018 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:46:36 crc kubenswrapper[4675]: I1125 12:46:36.580434 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:41 crc kubenswrapper[4675]: I1125 12:46:41.078844 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:41 crc kubenswrapper[4675]: I1125 12:46:41.079563 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:46:41 crc kubenswrapper[4675]: I1125 12:46:41.095522 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 12:46:41 crc kubenswrapper[4675]: I1125 12:46:41.260052 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:46:41 crc kubenswrapper[4675]: I1125 12:46:41.378326 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.078688 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.102897 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-fernet-keys\") pod \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.102957 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw4sj\" (UniqueName: \"kubernetes.io/projected/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-kube-api-access-rw4sj\") pod \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.102979 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-scripts\") pod \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.103104 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-config-data\") pod \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.103148 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-credential-keys\") pod \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.103167 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-combined-ca-bundle\") pod \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\" (UID: \"3ddf9f0c-7045-4afe-945e-6c6f04f3e699\") " Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.119100 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-kube-api-access-rw4sj" (OuterVolumeSpecName: "kube-api-access-rw4sj") pod "3ddf9f0c-7045-4afe-945e-6c6f04f3e699" (UID: "3ddf9f0c-7045-4afe-945e-6c6f04f3e699"). InnerVolumeSpecName "kube-api-access-rw4sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.119272 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3ddf9f0c-7045-4afe-945e-6c6f04f3e699" (UID: "3ddf9f0c-7045-4afe-945e-6c6f04f3e699"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.140554 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3ddf9f0c-7045-4afe-945e-6c6f04f3e699" (UID: "3ddf9f0c-7045-4afe-945e-6c6f04f3e699"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.141041 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-scripts" (OuterVolumeSpecName: "scripts") pod "3ddf9f0c-7045-4afe-945e-6c6f04f3e699" (UID: "3ddf9f0c-7045-4afe-945e-6c6f04f3e699"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.184308 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ddf9f0c-7045-4afe-945e-6c6f04f3e699" (UID: "3ddf9f0c-7045-4afe-945e-6c6f04f3e699"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.214780 4675 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.214807 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.214831 4675 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.214840 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rw4sj\" (UniqueName: \"kubernetes.io/projected/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-kube-api-access-rw4sj\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.214850 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.250922 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-config-data" (OuterVolumeSpecName: "config-data") pod "3ddf9f0c-7045-4afe-945e-6c6f04f3e699" (UID: "3ddf9f0c-7045-4afe-945e-6c6f04f3e699"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.315594 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddf9f0c-7045-4afe-945e-6c6f04f3e699-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.721044 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bf4848886-8rwx5"] Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.921915 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-slqsb" event={"ID":"3ddf9f0c-7045-4afe-945e-6c6f04f3e699","Type":"ContainerDied","Data":"67f56ab102bdfa65f0c4a537cfd3e7a3d0c322020174c61c7688ed0a959f8b63"} Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.921950 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-slqsb" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.921952 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67f56ab102bdfa65f0c4a537cfd3e7a3d0c322020174c61c7688ed0a959f8b63" Nov 25 12:46:42 crc kubenswrapper[4675]: I1125 12:46:42.936137 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bf4848886-8rwx5" event={"ID":"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34","Type":"ContainerStarted","Data":"7e09a0f634381d698073197273bde7a3103ad42244787a0030cc15198def1cfb"} Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.309488 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-686987849d-794s5"] Nov 25 12:46:43 crc kubenswrapper[4675]: E1125 12:46:43.310169 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ddf9f0c-7045-4afe-945e-6c6f04f3e699" containerName="keystone-bootstrap" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.310183 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ddf9f0c-7045-4afe-945e-6c6f04f3e699" containerName="keystone-bootstrap" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.310391 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ddf9f0c-7045-4afe-945e-6c6f04f3e699" containerName="keystone-bootstrap" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.310910 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.322854 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.323480 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.323666 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-69vb7" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.323868 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.323996 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.328317 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.340477 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-686987849d-794s5"] Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371470 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-internal-tls-certs\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371514 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-combined-ca-bundle\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371536 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-scripts\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371553 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54jbf\" (UniqueName: \"kubernetes.io/projected/ef21ec22-c1e4-490d-b59c-8ffec71be972-kube-api-access-54jbf\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371601 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-config-data\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371630 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-public-tls-certs\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371645 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-credential-keys\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.371684 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-fernet-keys\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.472919 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-fernet-keys\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.472992 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-internal-tls-certs\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.473017 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-combined-ca-bundle\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.473037 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-scripts\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.473057 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54jbf\" (UniqueName: \"kubernetes.io/projected/ef21ec22-c1e4-490d-b59c-8ffec71be972-kube-api-access-54jbf\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.473108 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-config-data\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.473136 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-public-tls-certs\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.473154 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-credential-keys\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.492571 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-internal-tls-certs\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.494712 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-credential-keys\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.495411 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-combined-ca-bundle\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.496961 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-public-tls-certs\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.497860 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-scripts\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.498078 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-config-data\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.499594 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ef21ec22-c1e4-490d-b59c-8ffec71be972-fernet-keys\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.502209 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54jbf\" (UniqueName: \"kubernetes.io/projected/ef21ec22-c1e4-490d-b59c-8ffec71be972-kube-api-access-54jbf\") pod \"keystone-686987849d-794s5\" (UID: \"ef21ec22-c1e4-490d-b59c-8ffec71be972\") " pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.663483 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.663540 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.663588 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.664365 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0197b926bc3d167c79e69a56309b09a9d11fe04e548d7c1fd85fdf36e1e96e54"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.664440 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://0197b926bc3d167c79e69a56309b09a9d11fe04e548d7c1fd85fdf36e1e96e54" gracePeriod=600 Nov 25 12:46:43 crc kubenswrapper[4675]: I1125 12:46:43.715327 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.014301 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e6f055cd-061d-43e5-8645-e351a7558608","Type":"ContainerStarted","Data":"3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.018996 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rxzpl" event={"ID":"8ee67608-bfaa-407c-9256-488729244fe0","Type":"ContainerStarted","Data":"d22778495631214cfd4c1334c0b28ef3bb60257cff4de3ee737bd9542c4e93e8"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.048931 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-mj5tw" event={"ID":"92b6601f-6d94-4208-843b-a0fe1aac75ed","Type":"ContainerStarted","Data":"1dc687f07b67ce27514732f4d864510d41ed685d9cd44255eb7b27574caccc55"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.082976 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-rxzpl" podStartSLOduration=4.845530736 podStartE2EDuration="53.082955517s" podCreationTimestamp="2025-11-25 12:45:51 +0000 UTC" firstStartedPulling="2025-11-25 12:45:54.018481079 +0000 UTC m=+1099.190073420" lastFinishedPulling="2025-11-25 12:46:42.25590586 +0000 UTC m=+1147.427498201" observedRunningTime="2025-11-25 12:46:44.049159599 +0000 UTC m=+1149.220751940" watchObservedRunningTime="2025-11-25 12:46:44.082955517 +0000 UTC m=+1149.254547878" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.097025 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="0197b926bc3d167c79e69a56309b09a9d11fe04e548d7c1fd85fdf36e1e96e54" exitCode=0 Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.097107 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"0197b926bc3d167c79e69a56309b09a9d11fe04e548d7c1fd85fdf36e1e96e54"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.097140 4675 scope.go:117] "RemoveContainer" containerID="cc913b0681d35b11dd746803fa5089245c866bc325aff60631a14bc726556c0f" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.102414 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-mj5tw" podStartSLOduration=26.622542305 podStartE2EDuration="41.102394138s" podCreationTimestamp="2025-11-25 12:46:03 +0000 UTC" firstStartedPulling="2025-11-25 12:46:27.689676282 +0000 UTC m=+1132.861268623" lastFinishedPulling="2025-11-25 12:46:42.169528115 +0000 UTC m=+1147.341120456" observedRunningTime="2025-11-25 12:46:44.085954965 +0000 UTC m=+1149.257547306" watchObservedRunningTime="2025-11-25 12:46:44.102394138 +0000 UTC m=+1149.273986479" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.103182 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerStarted","Data":"a8565f5ee9984ba9431c42c09166778b0746f2f0dcdc0c2e7b249a6e14b293ef"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.112858 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bf4848886-8rwx5" event={"ID":"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34","Type":"ContainerStarted","Data":"c7b5a6dd7497fecb3c52b6b9cc557ccce323c39c99eface0359cf655d7c2c296"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.112935 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bf4848886-8rwx5" event={"ID":"1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34","Type":"ContainerStarted","Data":"f4cd7c4621e243cf2c549919d50445be6edc10050f6214dbebfa8b0550ab3799"} Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.114478 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.114514 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.148221 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5bf4848886-8rwx5" podStartSLOduration=8.148204265 podStartE2EDuration="8.148204265s" podCreationTimestamp="2025-11-25 12:46:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:44.136739663 +0000 UTC m=+1149.308332014" watchObservedRunningTime="2025-11-25 12:46:44.148204265 +0000 UTC m=+1149.319796606" Nov 25 12:46:44 crc kubenswrapper[4675]: I1125 12:46:44.573254 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-686987849d-794s5"] Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.128155 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-686987849d-794s5" event={"ID":"ef21ec22-c1e4-490d-b59c-8ffec71be972","Type":"ContainerStarted","Data":"aeaabccb36b64ff701f5fe404cc6640c4a856faaf62507207dbcd12004604ffd"} Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.128493 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-686987849d-794s5" event={"ID":"ef21ec22-c1e4-490d-b59c-8ffec71be972","Type":"ContainerStarted","Data":"aaa43b90ed707dd5a36236e0c40bb13f592927769046241d31a69b07dce4364c"} Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.128665 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-686987849d-794s5" Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.134851 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e6f055cd-061d-43e5-8645-e351a7558608","Type":"ContainerStarted","Data":"f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d"} Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.147073 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"d19f1130a91b11d8bc294a8adf419715b599ba329818568ac60752a3ee96613c"} Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.163390 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-686987849d-794s5" podStartSLOduration=2.163342224 podStartE2EDuration="2.163342224s" podCreationTimestamp="2025-11-25 12:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:45.154525598 +0000 UTC m=+1150.326117959" watchObservedRunningTime="2025-11-25 12:46:45.163342224 +0000 UTC m=+1150.334934575" Nov 25 12:46:45 crc kubenswrapper[4675]: I1125 12:46:45.204031 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=15.204012804 podStartE2EDuration="15.204012804s" podCreationTimestamp="2025-11-25 12:46:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:45.201267365 +0000 UTC m=+1150.372859726" watchObservedRunningTime="2025-11-25 12:46:45.204012804 +0000 UTC m=+1150.375605135" Nov 25 12:46:50 crc kubenswrapper[4675]: I1125 12:46:50.202072 4675 generic.go:334] "Generic (PLEG): container finished" podID="92b6601f-6d94-4208-843b-a0fe1aac75ed" containerID="1dc687f07b67ce27514732f4d864510d41ed685d9cd44255eb7b27574caccc55" exitCode=0 Nov 25 12:46:50 crc kubenswrapper[4675]: I1125 12:46:50.202154 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-mj5tw" event={"ID":"92b6601f-6d94-4208-843b-a0fe1aac75ed","Type":"ContainerDied","Data":"1dc687f07b67ce27514732f4d864510d41ed685d9cd44255eb7b27574caccc55"} Nov 25 12:46:51 crc kubenswrapper[4675]: I1125 12:46:51.219167 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 12:46:51 crc kubenswrapper[4675]: I1125 12:46:51.219623 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 12:46:51 crc kubenswrapper[4675]: I1125 12:46:51.258054 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:46:51 crc kubenswrapper[4675]: I1125 12:46:51.269650 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 12:46:51 crc kubenswrapper[4675]: I1125 12:46:51.276021 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 12:46:51 crc kubenswrapper[4675]: I1125 12:46:51.375511 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:46:52 crc kubenswrapper[4675]: I1125 12:46:52.225318 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 12:46:52 crc kubenswrapper[4675]: I1125 12:46:52.225615 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 12:46:53 crc kubenswrapper[4675]: I1125 12:46:53.864119 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:53 crc kubenswrapper[4675]: I1125 12:46:53.957630 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-db-sync-config-data\") pod \"92b6601f-6d94-4208-843b-a0fe1aac75ed\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " Nov 25 12:46:53 crc kubenswrapper[4675]: I1125 12:46:53.957745 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgt9g\" (UniqueName: \"kubernetes.io/projected/92b6601f-6d94-4208-843b-a0fe1aac75ed-kube-api-access-lgt9g\") pod \"92b6601f-6d94-4208-843b-a0fe1aac75ed\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " Nov 25 12:46:53 crc kubenswrapper[4675]: I1125 12:46:53.957794 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-combined-ca-bundle\") pod \"92b6601f-6d94-4208-843b-a0fe1aac75ed\" (UID: \"92b6601f-6d94-4208-843b-a0fe1aac75ed\") " Nov 25 12:46:53 crc kubenswrapper[4675]: I1125 12:46:53.967330 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "92b6601f-6d94-4208-843b-a0fe1aac75ed" (UID: "92b6601f-6d94-4208-843b-a0fe1aac75ed"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:53 crc kubenswrapper[4675]: I1125 12:46:53.973695 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92b6601f-6d94-4208-843b-a0fe1aac75ed-kube-api-access-lgt9g" (OuterVolumeSpecName: "kube-api-access-lgt9g") pod "92b6601f-6d94-4208-843b-a0fe1aac75ed" (UID: "92b6601f-6d94-4208-843b-a0fe1aac75ed"). InnerVolumeSpecName "kube-api-access-lgt9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.033945 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "92b6601f-6d94-4208-843b-a0fe1aac75ed" (UID: "92b6601f-6d94-4208-843b-a0fe1aac75ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.060849 4675 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.060876 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgt9g\" (UniqueName: \"kubernetes.io/projected/92b6601f-6d94-4208-843b-a0fe1aac75ed-kube-api-access-lgt9g\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.060888 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b6601f-6d94-4208-843b-a0fe1aac75ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:46:54 crc kubenswrapper[4675]: E1125 12:46:54.160099 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.255857 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-mj5tw" event={"ID":"92b6601f-6d94-4208-843b-a0fe1aac75ed","Type":"ContainerDied","Data":"e20253d95d6d04ac22794290b143a64adbf089f6c19e5822887e0283f162310b"} Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.255901 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e20253d95d6d04ac22794290b143a64adbf089f6c19e5822887e0283f162310b" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.255964 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-mj5tw" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.262077 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerStarted","Data":"c33fbcc1db60efb99283d727b7b7f96527d630071fbd673998b4e38f14c58f9e"} Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.262342 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="ceilometer-notification-agent" containerID="cri-o://fbddd47512880ae7eb09023ee3ee2a541f1b4a0a05a9ed04baa08307ec5af807" gracePeriod=30 Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.262362 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="proxy-httpd" containerID="cri-o://c33fbcc1db60efb99283d727b7b7f96527d630071fbd673998b4e38f14c58f9e" gracePeriod=30 Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.262388 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.262461 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="sg-core" containerID="cri-o://a8565f5ee9984ba9431c42c09166778b0746f2f0dcdc0c2e7b249a6e14b293ef" gracePeriod=30 Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.612685 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.612782 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:46:54 crc kubenswrapper[4675]: I1125 12:46:54.836985 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.118382 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5f59d94c69-6bgtx"] Nov 25 12:46:55 crc kubenswrapper[4675]: E1125 12:46:55.119383 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92b6601f-6d94-4208-843b-a0fe1aac75ed" containerName="barbican-db-sync" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.119403 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="92b6601f-6d94-4208-843b-a0fe1aac75ed" containerName="barbican-db-sync" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.120166 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="92b6601f-6d94-4208-843b-a0fe1aac75ed" containerName="barbican-db-sync" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.121988 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.139899 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.140117 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8gfpq" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.140282 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.214057 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-858b4645fd-rr59w"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.219149 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.228977 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.231518 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5f59d94c69-6bgtx"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.265008 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-858b4645fd-rr59w"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.284791 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-config-data\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.284850 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-logs\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.284880 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zstxk\" (UniqueName: \"kubernetes.io/projected/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-kube-api-access-zstxk\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.284953 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-config-data-custom\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.284988 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-combined-ca-bundle\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.303178 4675 generic.go:334] "Generic (PLEG): container finished" podID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerID="c33fbcc1db60efb99283d727b7b7f96527d630071fbd673998b4e38f14c58f9e" exitCode=0 Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.303232 4675 generic.go:334] "Generic (PLEG): container finished" podID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerID="a8565f5ee9984ba9431c42c09166778b0746f2f0dcdc0c2e7b249a6e14b293ef" exitCode=2 Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.303281 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerDied","Data":"c33fbcc1db60efb99283d727b7b7f96527d630071fbd673998b4e38f14c58f9e"} Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.303338 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerDied","Data":"a8565f5ee9984ba9431c42c09166778b0746f2f0dcdc0c2e7b249a6e14b293ef"} Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.317121 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586bdc5f9-wkql5"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.318574 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.343042 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586bdc5f9-wkql5"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.386891 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-config-data-custom\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.386947 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-combined-ca-bundle\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.386974 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hspjp\" (UniqueName: \"kubernetes.io/projected/a4640f4e-98fe-438c-bc12-38c11c62f997-kube-api-access-hspjp\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.386997 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-combined-ca-bundle\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.387027 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4640f4e-98fe-438c-bc12-38c11c62f997-logs\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.387059 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-config-data\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.387091 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-config-data\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.388125 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-logs\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.388226 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zstxk\" (UniqueName: \"kubernetes.io/projected/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-kube-api-access-zstxk\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.388382 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-config-data-custom\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.388502 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-logs\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.396231 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-combined-ca-bundle\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.417653 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-config-data-custom\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.432843 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-74b4cf8674-4p2tv"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.438866 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.441731 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zstxk\" (UniqueName: \"kubernetes.io/projected/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-kube-api-access-zstxk\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.441989 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/654bfacb-d4b4-45a3-ae90-92496e1b5e9e-config-data\") pod \"barbican-worker-5f59d94c69-6bgtx\" (UID: \"654bfacb-d4b4-45a3-ae90-92496e1b5e9e\") " pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.452669 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.462214 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-74b4cf8674-4p2tv"] Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.483174 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f59d94c69-6bgtx" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.490977 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-nb\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491056 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k92k5\" (UniqueName: \"kubernetes.io/projected/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-kube-api-access-k92k5\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491086 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hspjp\" (UniqueName: \"kubernetes.io/projected/a4640f4e-98fe-438c-bc12-38c11c62f997-kube-api-access-hspjp\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491109 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-combined-ca-bundle\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491161 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4640f4e-98fe-438c-bc12-38c11c62f997-logs\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491215 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-config-data\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491289 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-swift-storage-0\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491311 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-svc\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491365 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-config-data-custom\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491398 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-config\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.491433 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-sb\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.500064 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a4640f4e-98fe-438c-bc12-38c11c62f997-logs\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.508979 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-combined-ca-bundle\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.512673 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hspjp\" (UniqueName: \"kubernetes.io/projected/a4640f4e-98fe-438c-bc12-38c11c62f997-kube-api-access-hspjp\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.520208 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-config-data\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.523541 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a4640f4e-98fe-438c-bc12-38c11c62f997-config-data-custom\") pod \"barbican-keystone-listener-858b4645fd-rr59w\" (UID: \"a4640f4e-98fe-438c-bc12-38c11c62f997\") " pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.552177 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593209 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0409088-0aa9-46b9-9685-8bb61c1f0557-logs\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593577 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-swift-storage-0\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593601 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-svc\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593619 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-combined-ca-bundle\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593641 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpmhf\" (UniqueName: \"kubernetes.io/projected/c0409088-0aa9-46b9-9685-8bb61c1f0557-kube-api-access-rpmhf\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593678 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593701 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-config\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593732 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-sb\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593757 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-nb\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593799 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k92k5\" (UniqueName: \"kubernetes.io/projected/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-kube-api-access-k92k5\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.593856 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data-custom\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.594677 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-swift-storage-0\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.595211 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-svc\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.595758 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-config\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.596289 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-sb\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.596801 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-nb\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.632960 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k92k5\" (UniqueName: \"kubernetes.io/projected/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-kube-api-access-k92k5\") pod \"dnsmasq-dns-586bdc5f9-wkql5\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.644308 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.695211 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.695318 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data-custom\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.695342 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0409088-0aa9-46b9-9685-8bb61c1f0557-logs\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.695399 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-combined-ca-bundle\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.695417 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpmhf\" (UniqueName: \"kubernetes.io/projected/c0409088-0aa9-46b9-9685-8bb61c1f0557-kube-api-access-rpmhf\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.698202 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0409088-0aa9-46b9-9685-8bb61c1f0557-logs\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.703131 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-combined-ca-bundle\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.703503 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.716914 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data-custom\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.725363 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpmhf\" (UniqueName: \"kubernetes.io/projected/c0409088-0aa9-46b9-9685-8bb61c1f0557-kube-api-access-rpmhf\") pod \"barbican-api-74b4cf8674-4p2tv\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:55 crc kubenswrapper[4675]: I1125 12:46:55.881214 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:56 crc kubenswrapper[4675]: I1125 12:46:56.203338 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5f59d94c69-6bgtx"] Nov 25 12:46:56 crc kubenswrapper[4675]: I1125 12:46:56.320106 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f59d94c69-6bgtx" event={"ID":"654bfacb-d4b4-45a3-ae90-92496e1b5e9e","Type":"ContainerStarted","Data":"f0ab3b3a652ac218b770e16ead78903b819cb15473dd78a02036b5c15a30b753"} Nov 25 12:46:56 crc kubenswrapper[4675]: I1125 12:46:56.392104 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-858b4645fd-rr59w"] Nov 25 12:46:56 crc kubenswrapper[4675]: I1125 12:46:56.529257 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586bdc5f9-wkql5"] Nov 25 12:46:56 crc kubenswrapper[4675]: W1125 12:46:56.534556 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b2bb9c7_3d3a_4d45_ad42_2f422dea1c40.slice/crio-6ece05da366ad51570592627c0a474fb593df26fdc79912d39812e1e82f0cb1e WatchSource:0}: Error finding container 6ece05da366ad51570592627c0a474fb593df26fdc79912d39812e1e82f0cb1e: Status 404 returned error can't find the container with id 6ece05da366ad51570592627c0a474fb593df26fdc79912d39812e1e82f0cb1e Nov 25 12:46:56 crc kubenswrapper[4675]: I1125 12:46:56.554575 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-74b4cf8674-4p2tv"] Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.330360 4675 generic.go:334] "Generic (PLEG): container finished" podID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerID="d852a34d95acce5c24dd6010693a7446972d094bc314f1a279652d7d72016fb2" exitCode=0 Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.330469 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" event={"ID":"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40","Type":"ContainerDied","Data":"d852a34d95acce5c24dd6010693a7446972d094bc314f1a279652d7d72016fb2"} Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.330740 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" event={"ID":"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40","Type":"ContainerStarted","Data":"6ece05da366ad51570592627c0a474fb593df26fdc79912d39812e1e82f0cb1e"} Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.344203 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" event={"ID":"a4640f4e-98fe-438c-bc12-38c11c62f997","Type":"ContainerStarted","Data":"1b3373050cd55b22c1e9e1290c2be8acaecbec8a5aee75b4ffd462166e152f43"} Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.346704 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74b4cf8674-4p2tv" event={"ID":"c0409088-0aa9-46b9-9685-8bb61c1f0557","Type":"ContainerStarted","Data":"77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521"} Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.346732 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74b4cf8674-4p2tv" event={"ID":"c0409088-0aa9-46b9-9685-8bb61c1f0557","Type":"ContainerStarted","Data":"56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1"} Nov 25 12:46:57 crc kubenswrapper[4675]: I1125 12:46:57.346742 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74b4cf8674-4p2tv" event={"ID":"c0409088-0aa9-46b9-9685-8bb61c1f0557","Type":"ContainerStarted","Data":"df9cd2a04d443ec88bc3867de4eda5cd567d71c2a167892cadaa9f4d1a51e098"} Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.271348 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7ff8694d7d-s9pzw"] Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.273045 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.275373 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.275570 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.340912 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7ff8694d7d-s9pzw"] Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.364798 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" event={"ID":"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40","Type":"ContainerStarted","Data":"b48f1d8519d6a39686777d3889661141075cf22c62bcd6cc8fddf6c435e6c9de"} Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.365653 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.365699 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.365729 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374128 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-config-data-custom\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374239 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-config-data\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374295 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51325415-d3b2-4852-bdce-6861cd1dc391-logs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374336 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bclxj\" (UniqueName: \"kubernetes.io/projected/51325415-d3b2-4852-bdce-6861cd1dc391-kube-api-access-bclxj\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374385 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-internal-tls-certs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374418 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-combined-ca-bundle\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.374454 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-public-tls-certs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.394104 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-74b4cf8674-4p2tv" podStartSLOduration=3.3940889309999998 podStartE2EDuration="3.394088931s" podCreationTimestamp="2025-11-25 12:46:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:58.390039779 +0000 UTC m=+1163.561632140" watchObservedRunningTime="2025-11-25 12:46:58.394088931 +0000 UTC m=+1163.565681272" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.417781 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" podStartSLOduration=3.417761219 podStartE2EDuration="3.417761219s" podCreationTimestamp="2025-11-25 12:46:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:46:58.415123044 +0000 UTC m=+1163.586715385" watchObservedRunningTime="2025-11-25 12:46:58.417761219 +0000 UTC m=+1163.589353570" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476140 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-config-data\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476305 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51325415-d3b2-4852-bdce-6861cd1dc391-logs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476387 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bclxj\" (UniqueName: \"kubernetes.io/projected/51325415-d3b2-4852-bdce-6861cd1dc391-kube-api-access-bclxj\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476461 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-internal-tls-certs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476543 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-combined-ca-bundle\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476618 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-public-tls-certs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.476867 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-config-data-custom\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.482958 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51325415-d3b2-4852-bdce-6861cd1dc391-logs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.485587 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-config-data\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.487468 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-public-tls-certs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.487496 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-combined-ca-bundle\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.488037 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-internal-tls-certs\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.490874 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/51325415-d3b2-4852-bdce-6861cd1dc391-config-data-custom\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.499505 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bclxj\" (UniqueName: \"kubernetes.io/projected/51325415-d3b2-4852-bdce-6861cd1dc391-kube-api-access-bclxj\") pod \"barbican-api-7ff8694d7d-s9pzw\" (UID: \"51325415-d3b2-4852-bdce-6861cd1dc391\") " pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:58 crc kubenswrapper[4675]: I1125 12:46:58.592177 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.377276 4675 generic.go:334] "Generic (PLEG): container finished" podID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerID="fbddd47512880ae7eb09023ee3ee2a541f1b4a0a05a9ed04baa08307ec5af807" exitCode=0 Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.377473 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerDied","Data":"fbddd47512880ae7eb09023ee3ee2a541f1b4a0a05a9ed04baa08307ec5af807"} Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.385202 4675 generic.go:334] "Generic (PLEG): container finished" podID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerID="fcace78d1b1e66c2fffddc02d605118279839dca308bf6e560fffedfac4d2de6" exitCode=137 Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.385227 4675 generic.go:334] "Generic (PLEG): container finished" podID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerID="bf7b9ccab814019368852cc632af2e53a0993b3b35e417cb3ea473d01a2b8bfe" exitCode=137 Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.385244 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74dcc94bb9-qjv6x" event={"ID":"e30a18e5-593a-404a-9bc3-0ac55ecf6d94","Type":"ContainerDied","Data":"fcace78d1b1e66c2fffddc02d605118279839dca308bf6e560fffedfac4d2de6"} Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.385272 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74dcc94bb9-qjv6x" event={"ID":"e30a18e5-593a-404a-9bc3-0ac55ecf6d94","Type":"ContainerDied","Data":"bf7b9ccab814019368852cc632af2e53a0993b3b35e417cb3ea473d01a2b8bfe"} Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.387417 4675 generic.go:334] "Generic (PLEG): container finished" podID="8ee67608-bfaa-407c-9256-488729244fe0" containerID="d22778495631214cfd4c1334c0b28ef3bb60257cff4de3ee737bd9542c4e93e8" exitCode=0 Nov 25 12:46:59 crc kubenswrapper[4675]: I1125 12:46:59.388153 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rxzpl" event={"ID":"8ee67608-bfaa-407c-9256-488729244fe0","Type":"ContainerDied","Data":"d22778495631214cfd4c1334c0b28ef3bb60257cff4de3ee737bd9542c4e93e8"} Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.039423 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.107146 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-scripts\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.108003 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-sg-core-conf-yaml\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.108179 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98q44\" (UniqueName: \"kubernetes.io/projected/c38138b1-41d4-4e17-94a8-c1a66a3725c3-kube-api-access-98q44\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.109081 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-combined-ca-bundle\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.109941 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-run-httpd\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.110123 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-log-httpd\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.110241 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-config-data\") pod \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\" (UID: \"c38138b1-41d4-4e17-94a8-c1a66a3725c3\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.110926 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.111234 4675 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.114987 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.134761 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c38138b1-41d4-4e17-94a8-c1a66a3725c3-kube-api-access-98q44" (OuterVolumeSpecName: "kube-api-access-98q44") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "kube-api-access-98q44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.143436 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-scripts" (OuterVolumeSpecName: "scripts") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.168202 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.213958 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.219256 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.220496 4675 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.220526 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98q44\" (UniqueName: \"kubernetes.io/projected/c38138b1-41d4-4e17-94a8-c1a66a3725c3-kube-api-access-98q44\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.220536 4675 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38138b1-41d4-4e17-94a8-c1a66a3725c3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.270514 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7ff8694d7d-s9pzw"] Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.310998 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-config-data" (OuterVolumeSpecName: "config-data") pod "c38138b1-41d4-4e17-94a8-c1a66a3725c3" (UID: "c38138b1-41d4-4e17-94a8-c1a66a3725c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.323408 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.323439 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38138b1-41d4-4e17-94a8-c1a66a3725c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.398743 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.401054 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74dcc94bb9-qjv6x" event={"ID":"e30a18e5-593a-404a-9bc3-0ac55ecf6d94","Type":"ContainerDied","Data":"65acf609d4588c8b4dfbe47b148668cc50cfe409f353f500cb7aadf697f079cb"} Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.401144 4675 scope.go:117] "RemoveContainer" containerID="fcace78d1b1e66c2fffddc02d605118279839dca308bf6e560fffedfac4d2de6" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.424517 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-horizon-secret-key\") pod \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.424584 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-logs\") pod \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.424739 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-scripts\") pod \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.424937 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-config-data\") pod \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.425064 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h68mc\" (UniqueName: \"kubernetes.io/projected/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-kube-api-access-h68mc\") pod \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\" (UID: \"e30a18e5-593a-404a-9bc3-0ac55ecf6d94\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.426693 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-logs" (OuterVolumeSpecName: "logs") pod "e30a18e5-593a-404a-9bc3-0ac55ecf6d94" (UID: "e30a18e5-593a-404a-9bc3-0ac55ecf6d94"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.435591 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" event={"ID":"a4640f4e-98fe-438c-bc12-38c11c62f997","Type":"ContainerStarted","Data":"3b7274330186b947dfe9088e8efae46fdb7e0170ab430a118bb39053b030c889"} Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.446041 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f59d94c69-6bgtx" event={"ID":"654bfacb-d4b4-45a3-ae90-92496e1b5e9e","Type":"ContainerStarted","Data":"3d8d3777228080a152e3a01f4e37bfc8350b5603102225a3d73f0cdd5b44b917"} Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.448419 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7ff8694d7d-s9pzw" event={"ID":"51325415-d3b2-4852-bdce-6861cd1dc391","Type":"ContainerStarted","Data":"4d70a58f8d52acc39baf9314f3520558f6eac1431f20e53f78029affde7257d0"} Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.451753 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-kube-api-access-h68mc" (OuterVolumeSpecName: "kube-api-access-h68mc") pod "e30a18e5-593a-404a-9bc3-0ac55ecf6d94" (UID: "e30a18e5-593a-404a-9bc3-0ac55ecf6d94"). InnerVolumeSpecName "kube-api-access-h68mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.452602 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38138b1-41d4-4e17-94a8-c1a66a3725c3","Type":"ContainerDied","Data":"3a6aac522d7aade7b31a9237b9b9bbff8b07d2a043cca75c56b8ab27f9f2ab02"} Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.453476 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "e30a18e5-593a-404a-9bc3-0ac55ecf6d94" (UID: "e30a18e5-593a-404a-9bc3-0ac55ecf6d94"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.457753 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.532627 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.540308 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h68mc\" (UniqueName: \"kubernetes.io/projected/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-kube-api-access-h68mc\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.540337 4675 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.540351 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.574540 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.591416 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:00 crc kubenswrapper[4675]: E1125 12:47:00.592635 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon-log" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.592659 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon-log" Nov 25 12:47:00 crc kubenswrapper[4675]: E1125 12:47:00.592682 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="proxy-httpd" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.592689 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="proxy-httpd" Nov 25 12:47:00 crc kubenswrapper[4675]: E1125 12:47:00.592715 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="ceilometer-notification-agent" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.592723 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="ceilometer-notification-agent" Nov 25 12:47:00 crc kubenswrapper[4675]: E1125 12:47:00.592743 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="sg-core" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.592749 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="sg-core" Nov 25 12:47:00 crc kubenswrapper[4675]: E1125 12:47:00.592770 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.592777 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.593116 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="ceilometer-notification-agent" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.593145 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="proxy-httpd" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.593166 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" containerName="sg-core" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.593183 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.593198 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" containerName="horizon-log" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.598629 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.600621 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-config-data" (OuterVolumeSpecName: "config-data") pod "e30a18e5-593a-404a-9bc3-0ac55ecf6d94" (UID: "e30a18e5-593a-404a-9bc3-0ac55ecf6d94"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.603206 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.609090 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.609242 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.636917 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-scripts" (OuterVolumeSpecName: "scripts") pod "e30a18e5-593a-404a-9bc3-0ac55ecf6d94" (UID: "e30a18e5-593a-404a-9bc3-0ac55ecf6d94"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.643806 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-log-httpd\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.643924 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-run-httpd\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.643990 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6pv5\" (UniqueName: \"kubernetes.io/projected/4826c966-919b-42a1-a9e4-a6c7ea65c426-kube-api-access-t6pv5\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.644013 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.644031 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.644046 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-scripts\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.644067 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-config-data\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.644145 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.644158 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30a18e5-593a-404a-9bc3-0ac55ecf6d94-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.709059 4675 scope.go:117] "RemoveContainer" containerID="bf7b9ccab814019368852cc632af2e53a0993b3b35e417cb3ea473d01a2b8bfe" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.749863 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6pv5\" (UniqueName: \"kubernetes.io/projected/4826c966-919b-42a1-a9e4-a6c7ea65c426-kube-api-access-t6pv5\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.749943 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.749975 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.749993 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-scripts\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.750017 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-config-data\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.750102 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-log-httpd\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.750123 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-run-httpd\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.750861 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-run-httpd\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.752077 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-log-httpd\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.755726 4675 scope.go:117] "RemoveContainer" containerID="c33fbcc1db60efb99283d727b7b7f96527d630071fbd673998b4e38f14c58f9e" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.763481 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.764019 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.778285 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6pv5\" (UniqueName: \"kubernetes.io/projected/4826c966-919b-42a1-a9e4-a6c7ea65c426-kube-api-access-t6pv5\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.780220 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-config-data\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.781410 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-scripts\") pod \"ceilometer-0\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.809851 4675 scope.go:117] "RemoveContainer" containerID="a8565f5ee9984ba9431c42c09166778b0746f2f0dcdc0c2e7b249a6e14b293ef" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.849053 4675 scope.go:117] "RemoveContainer" containerID="fbddd47512880ae7eb09023ee3ee2a541f1b4a0a05a9ed04baa08307ec5af807" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.892476 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.944055 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953082 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8sgr\" (UniqueName: \"kubernetes.io/projected/8ee67608-bfaa-407c-9256-488729244fe0-kube-api-access-j8sgr\") pod \"8ee67608-bfaa-407c-9256-488729244fe0\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953188 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-db-sync-config-data\") pod \"8ee67608-bfaa-407c-9256-488729244fe0\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953242 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-combined-ca-bundle\") pod \"8ee67608-bfaa-407c-9256-488729244fe0\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953277 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-config-data\") pod \"8ee67608-bfaa-407c-9256-488729244fe0\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953299 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-scripts\") pod \"8ee67608-bfaa-407c-9256-488729244fe0\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953379 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8ee67608-bfaa-407c-9256-488729244fe0-etc-machine-id\") pod \"8ee67608-bfaa-407c-9256-488729244fe0\" (UID: \"8ee67608-bfaa-407c-9256-488729244fe0\") " Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.953733 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8ee67608-bfaa-407c-9256-488729244fe0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8ee67608-bfaa-407c-9256-488729244fe0" (UID: "8ee67608-bfaa-407c-9256-488729244fe0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.959271 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-scripts" (OuterVolumeSpecName: "scripts") pod "8ee67608-bfaa-407c-9256-488729244fe0" (UID: "8ee67608-bfaa-407c-9256-488729244fe0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.959338 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ee67608-bfaa-407c-9256-488729244fe0-kube-api-access-j8sgr" (OuterVolumeSpecName: "kube-api-access-j8sgr") pod "8ee67608-bfaa-407c-9256-488729244fe0" (UID: "8ee67608-bfaa-407c-9256-488729244fe0"). InnerVolumeSpecName "kube-api-access-j8sgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:00 crc kubenswrapper[4675]: I1125 12:47:00.967191 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8ee67608-bfaa-407c-9256-488729244fe0" (UID: "8ee67608-bfaa-407c-9256-488729244fe0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.038237 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ee67608-bfaa-407c-9256-488729244fe0" (UID: "8ee67608-bfaa-407c-9256-488729244fe0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.059197 4675 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.059438 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.059450 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.059458 4675 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8ee67608-bfaa-407c-9256-488729244fe0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.059469 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8sgr\" (UniqueName: \"kubernetes.io/projected/8ee67608-bfaa-407c-9256-488729244fe0-kube-api-access-j8sgr\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.062940 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-config-data" (OuterVolumeSpecName: "config-data") pod "8ee67608-bfaa-407c-9256-488729244fe0" (UID: "8ee67608-bfaa-407c-9256-488729244fe0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.163694 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee67608-bfaa-407c-9256-488729244fe0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.253474 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.253552 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.254279 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"7bf144cdeff17dc28536fb9d88234abffec553b120667f546a29ca608a4da773"} pod="openstack/horizon-6df5497f4d-4g9tv" containerMessage="Container horizon failed startup probe, will be restarted" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.254318 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" containerID="cri-o://7bf144cdeff17dc28536fb9d88234abffec553b120667f546a29ca608a4da773" gracePeriod=30 Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.376348 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.376408 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.377089 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"b8727dfcb754c1134bbfe12d6c7ac53701bf0d8ea86188c3e45e1d8ade2e2c7f"} pod="openstack/horizon-85d4f84f96-fcncp" containerMessage="Container horizon failed startup probe, will be restarted" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.377128 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" containerID="cri-o://b8727dfcb754c1134bbfe12d6c7ac53701bf0d8ea86188c3e45e1d8ade2e2c7f" gracePeriod=30 Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.461078 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rxzpl" event={"ID":"8ee67608-bfaa-407c-9256-488729244fe0","Type":"ContainerDied","Data":"44de3fa9bcb75d8092ea5ce4c52b61fb01e0096a55ea253df053cb93d7fdb571"} Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.461124 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44de3fa9bcb75d8092ea5ce4c52b61fb01e0096a55ea253df053cb93d7fdb571" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.461142 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rxzpl" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.464031 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f59d94c69-6bgtx" event={"ID":"654bfacb-d4b4-45a3-ae90-92496e1b5e9e","Type":"ContainerStarted","Data":"970146693f8e0c46cc6e35d17b799dbbdaaaf129ce3dad0aba2482fd470c6a4d"} Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.468901 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7ff8694d7d-s9pzw" event={"ID":"51325415-d3b2-4852-bdce-6861cd1dc391","Type":"ContainerStarted","Data":"614482c8ce8fc800303829fa3bf6af2ad81bb319dc88aaab617f05870b2a0835"} Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.468937 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7ff8694d7d-s9pzw" event={"ID":"51325415-d3b2-4852-bdce-6861cd1dc391","Type":"ContainerStarted","Data":"b56651889c9097362db93d755633590f2f9c24d791861dab22be6441e7f31a72"} Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.469196 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.469253 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.502432 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74dcc94bb9-qjv6x" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.503889 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.514108 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5f59d94c69-6bgtx" podStartSLOduration=3.146146369 podStartE2EDuration="6.514081357s" podCreationTimestamp="2025-11-25 12:46:55 +0000 UTC" firstStartedPulling="2025-11-25 12:46:56.220088197 +0000 UTC m=+1161.391680538" lastFinishedPulling="2025-11-25 12:46:59.588023185 +0000 UTC m=+1164.759615526" observedRunningTime="2025-11-25 12:47:01.486635316 +0000 UTC m=+1166.658227677" watchObservedRunningTime="2025-11-25 12:47:01.514081357 +0000 UTC m=+1166.685673698" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.535677 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" event={"ID":"a4640f4e-98fe-438c-bc12-38c11c62f997","Type":"ContainerStarted","Data":"f97e450e7a23679447721925f4004751075380b1714d0b2eabb8f228f1a99d54"} Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.545079 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7ff8694d7d-s9pzw" podStartSLOduration=3.545060653 podStartE2EDuration="3.545060653s" podCreationTimestamp="2025-11-25 12:46:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:01.536675381 +0000 UTC m=+1166.708267722" watchObservedRunningTime="2025-11-25 12:47:01.545060653 +0000 UTC m=+1166.716652994" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.599415 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-858b4645fd-rr59w" podStartSLOduration=3.371832299 podStartE2EDuration="6.599396888s" podCreationTimestamp="2025-11-25 12:46:55 +0000 UTC" firstStartedPulling="2025-11-25 12:46:56.412477416 +0000 UTC m=+1161.584069767" lastFinishedPulling="2025-11-25 12:46:59.640042015 +0000 UTC m=+1164.811634356" observedRunningTime="2025-11-25 12:47:01.580299648 +0000 UTC m=+1166.751891989" watchObservedRunningTime="2025-11-25 12:47:01.599396888 +0000 UTC m=+1166.770989239" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.613367 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c38138b1-41d4-4e17-94a8-c1a66a3725c3" path="/var/lib/kubelet/pods/c38138b1-41d4-4e17-94a8-c1a66a3725c3/volumes" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.636957 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-74dcc94bb9-qjv6x"] Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.644925 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-74dcc94bb9-qjv6x"] Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.777586 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:01 crc kubenswrapper[4675]: E1125 12:47:01.778045 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ee67608-bfaa-407c-9256-488729244fe0" containerName="cinder-db-sync" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.778068 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ee67608-bfaa-407c-9256-488729244fe0" containerName="cinder-db-sync" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.778303 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ee67608-bfaa-407c-9256-488729244fe0" containerName="cinder-db-sync" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.779366 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.787921 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.788131 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.788258 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bxzgq" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.788352 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.808242 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.882497 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586bdc5f9-wkql5"] Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.882923 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerName="dnsmasq-dns" containerID="cri-o://b48f1d8519d6a39686777d3889661141075cf22c62bcd6cc8fddf6c435e6c9de" gracePeriod=10 Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.904541 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.904621 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-scripts\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.904639 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.904660 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ccdx\" (UniqueName: \"kubernetes.io/projected/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-kube-api-access-6ccdx\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.904894 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.904956 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.970796 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-qn76l"] Nov 25 12:47:01 crc kubenswrapper[4675]: I1125 12:47:01.972165 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.006940 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.006978 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-sb\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007015 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-nb\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007050 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-config\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007065 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-svc\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007084 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007100 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-swift-storage-0\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007126 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-scripts\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007140 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007154 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ccdx\" (UniqueName: \"kubernetes.io/projected/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-kube-api-access-6ccdx\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007181 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5ccd\" (UniqueName: \"kubernetes.io/projected/15f85be0-29b9-44e3-a88e-d1531192fbe0-kube-api-access-t5ccd\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.007227 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.011695 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.012713 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-qn76l"] Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.027337 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-scripts\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.031756 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.045352 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.059221 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.067851 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ccdx\" (UniqueName: \"kubernetes.io/projected/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-kube-api-access-6ccdx\") pod \"cinder-scheduler-0\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.110668 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-config\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.109923 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-config\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.110726 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-svc\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.110756 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-swift-storage-0\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.110809 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5ccd\" (UniqueName: \"kubernetes.io/projected/15f85be0-29b9-44e3-a88e-d1531192fbe0-kube-api-access-t5ccd\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.110939 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-sb\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.110971 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-nb\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.111542 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-nb\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.113118 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-swift-storage-0\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.113886 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-sb\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.114340 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.126166 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-svc\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.176584 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5ccd\" (UniqueName: \"kubernetes.io/projected/15f85be0-29b9-44e3-a88e-d1531192fbe0-kube-api-access-t5ccd\") pod \"dnsmasq-dns-795f4db4bc-qn76l\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.206883 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.240992 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.256218 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.296942 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.314501 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.366135 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-scripts\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.366466 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.366699 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ec74e66d-2eb7-4938-9e9e-1f19010c880b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.366920 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data-custom\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.367066 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.367257 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-472tn\" (UniqueName: \"kubernetes.io/projected/ec74e66d-2eb7-4938-9e9e-1f19010c880b-kube-api-access-472tn\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.367417 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec74e66d-2eb7-4938-9e9e-1f19010c880b-logs\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.473888 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec74e66d-2eb7-4938-9e9e-1f19010c880b-logs\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.475615 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-scripts\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.475772 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.476005 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ec74e66d-2eb7-4938-9e9e-1f19010c880b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.476187 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data-custom\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.476282 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.476426 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-472tn\" (UniqueName: \"kubernetes.io/projected/ec74e66d-2eb7-4938-9e9e-1f19010c880b-kube-api-access-472tn\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.475518 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec74e66d-2eb7-4938-9e9e-1f19010c880b-logs\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.477003 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ec74e66d-2eb7-4938-9e9e-1f19010c880b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.481886 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-scripts\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.484777 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.489225 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.489829 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data-custom\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.499088 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-472tn\" (UniqueName: \"kubernetes.io/projected/ec74e66d-2eb7-4938-9e9e-1f19010c880b-kube-api-access-472tn\") pod \"cinder-api-0\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " pod="openstack/cinder-api-0" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.599457 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerStarted","Data":"0b71a15353ccb501129618df52f2833ba034333805d002dd4d92840c4e7e05cb"} Nov 25 12:47:02 crc kubenswrapper[4675]: E1125 12:47:02.632868 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b2bb9c7_3d3a_4d45_ad42_2f422dea1c40.slice/crio-conmon-b48f1d8519d6a39686777d3889661141075cf22c62bcd6cc8fddf6c435e6c9de.scope\": RecentStats: unable to find data in memory cache]" Nov 25 12:47:02 crc kubenswrapper[4675]: I1125 12:47:02.761983 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.116499 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.282456 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-qn76l"] Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.415732 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.597288 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e30a18e5-593a-404a-9bc3-0ac55ecf6d94" path="/var/lib/kubelet/pods/e30a18e5-593a-404a-9bc3-0ac55ecf6d94/volumes" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.633495 4675 generic.go:334] "Generic (PLEG): container finished" podID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerID="b48f1d8519d6a39686777d3889661141075cf22c62bcd6cc8fddf6c435e6c9de" exitCode=0 Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.633560 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" event={"ID":"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40","Type":"ContainerDied","Data":"b48f1d8519d6a39686777d3889661141075cf22c62bcd6cc8fddf6c435e6c9de"} Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.635137 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2","Type":"ContainerStarted","Data":"2ad31b6e5c2df930c610d59e2cec704176713716c90cd4c95575361e7b53304d"} Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.636143 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ec74e66d-2eb7-4938-9e9e-1f19010c880b","Type":"ContainerStarted","Data":"e9106f991db93c7d7b0a0518fd1a7c7fb0877063988de238b29e0a8a33b92024"} Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.638575 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" event={"ID":"15f85be0-29b9-44e3-a88e-d1531192fbe0","Type":"ContainerStarted","Data":"be839c8375ba233e732676f96bce0e82d524ba14ec83df6489d8360d1902e6b8"} Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.687216 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.831441 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-sb\") pod \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.831505 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k92k5\" (UniqueName: \"kubernetes.io/projected/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-kube-api-access-k92k5\") pod \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.831556 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-svc\") pod \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.831588 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-nb\") pod \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.831612 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-config\") pod \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.831649 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-swift-storage-0\") pod \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\" (UID: \"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40\") " Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.863349 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-kube-api-access-k92k5" (OuterVolumeSpecName: "kube-api-access-k92k5") pod "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" (UID: "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40"). InnerVolumeSpecName "kube-api-access-k92k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.939044 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k92k5\" (UniqueName: \"kubernetes.io/projected/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-kube-api-access-k92k5\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.953576 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" (UID: "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.980957 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" (UID: "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:03 crc kubenswrapper[4675]: I1125 12:47:03.986033 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" (UID: "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.007130 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-config" (OuterVolumeSpecName: "config") pod "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" (UID: "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.014496 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" (UID: "5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.041178 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.041213 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.041222 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.041232 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.041243 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.659092 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" event={"ID":"5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40","Type":"ContainerDied","Data":"6ece05da366ad51570592627c0a474fb593df26fdc79912d39812e1e82f0cb1e"} Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.659403 4675 scope.go:117] "RemoveContainer" containerID="b48f1d8519d6a39686777d3889661141075cf22c62bcd6cc8fddf6c435e6c9de" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.659632 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586bdc5f9-wkql5" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.693758 4675 generic.go:334] "Generic (PLEG): container finished" podID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerID="e875e1a145a79f701b8ed23360286173ac974cbd581f145cec318f86ee5e6025" exitCode=0 Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.693906 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" event={"ID":"15f85be0-29b9-44e3-a88e-d1531192fbe0","Type":"ContainerDied","Data":"e875e1a145a79f701b8ed23360286173ac974cbd581f145cec318f86ee5e6025"} Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.699111 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerStarted","Data":"de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604"} Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.788889 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586bdc5f9-wkql5"] Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.788954 4675 scope.go:117] "RemoveContainer" containerID="d852a34d95acce5c24dd6010693a7446972d094bc314f1a279652d7d72016fb2" Nov 25 12:47:04 crc kubenswrapper[4675]: I1125 12:47:04.818580 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586bdc5f9-wkql5"] Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.549639 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" path="/var/lib/kubelet/pods/5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40/volumes" Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.725531 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" event={"ID":"15f85be0-29b9-44e3-a88e-d1531192fbe0","Type":"ContainerStarted","Data":"811f46e24f53511dababf0245bef9462c32547c8857e7ebb539e1455ed9de3c3"} Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.726110 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.734908 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerStarted","Data":"671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa"} Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.756900 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2","Type":"ContainerStarted","Data":"74e74f15d5046874ac521396d9948db9352772d544f80542597d46b844702d8e"} Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.758006 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ec74e66d-2eb7-4938-9e9e-1f19010c880b","Type":"ContainerStarted","Data":"37f4b32e8c1bf54ddd8242923fb38d9eb3831675fb999257d91c500a88323bad"} Nov 25 12:47:05 crc kubenswrapper[4675]: I1125 12:47:05.794624 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" podStartSLOduration=4.794606613 podStartE2EDuration="4.794606613s" podCreationTimestamp="2025-11-25 12:47:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:05.792131142 +0000 UTC m=+1170.963723493" watchObservedRunningTime="2025-11-25 12:47:05.794606613 +0000 UTC m=+1170.966198954" Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.356843 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.775876 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ec74e66d-2eb7-4938-9e9e-1f19010c880b","Type":"ContainerStarted","Data":"d6d2367b214b5ab769e48945098118f6bd2450ed6b208711463b4b01b2f6f44c"} Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.776046 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api-log" containerID="cri-o://37f4b32e8c1bf54ddd8242923fb38d9eb3831675fb999257d91c500a88323bad" gracePeriod=30 Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.776284 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.776317 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" containerID="cri-o://d6d2367b214b5ab769e48945098118f6bd2450ed6b208711463b4b01b2f6f44c" gracePeriod=30 Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.786171 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerStarted","Data":"2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708"} Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.808717 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.808695267 podStartE2EDuration="4.808695267s" podCreationTimestamp="2025-11-25 12:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:06.805160752 +0000 UTC m=+1171.976753093" watchObservedRunningTime="2025-11-25 12:47:06.808695267 +0000 UTC m=+1171.980287608" Nov 25 12:47:06 crc kubenswrapper[4675]: I1125 12:47:06.925022 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:07 crc kubenswrapper[4675]: I1125 12:47:07.802796 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2","Type":"ContainerStarted","Data":"a5f352c258bf31524bdb9559a6e82952fcdcc96d22f9f1a044cd414198f255e2"} Nov 25 12:47:07 crc kubenswrapper[4675]: I1125 12:47:07.815333 4675 generic.go:334] "Generic (PLEG): container finished" podID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerID="37f4b32e8c1bf54ddd8242923fb38d9eb3831675fb999257d91c500a88323bad" exitCode=143 Nov 25 12:47:07 crc kubenswrapper[4675]: I1125 12:47:07.815386 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ec74e66d-2eb7-4938-9e9e-1f19010c880b","Type":"ContainerDied","Data":"37f4b32e8c1bf54ddd8242923fb38d9eb3831675fb999257d91c500a88323bad"} Nov 25 12:47:07 crc kubenswrapper[4675]: I1125 12:47:07.826929 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.719838901 podStartE2EDuration="6.826906794s" podCreationTimestamp="2025-11-25 12:47:01 +0000 UTC" firstStartedPulling="2025-11-25 12:47:03.291264914 +0000 UTC m=+1168.462857255" lastFinishedPulling="2025-11-25 12:47:04.398332817 +0000 UTC m=+1169.569925148" observedRunningTime="2025-11-25 12:47:07.821490128 +0000 UTC m=+1172.993082499" watchObservedRunningTime="2025-11-25 12:47:07.826906794 +0000 UTC m=+1172.998499165" Nov 25 12:47:08 crc kubenswrapper[4675]: I1125 12:47:08.826968 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerStarted","Data":"59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659"} Nov 25 12:47:08 crc kubenswrapper[4675]: I1125 12:47:08.827306 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 12:47:09 crc kubenswrapper[4675]: I1125 12:47:09.924023 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:10 crc kubenswrapper[4675]: I1125 12:47:10.966007 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:10 crc kubenswrapper[4675]: I1125 12:47:10.966503 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:11 crc kubenswrapper[4675]: I1125 12:47:11.969550 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7ff8694d7d-s9pzw" podUID="51325415-d3b2-4852-bdce-6861cd1dc391" containerName="barbican-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 12:47:11 crc kubenswrapper[4675]: I1125 12:47:11.969666 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.120581 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.122074 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.163:8080/\": dial tcp 10.217.0.163:8080: connect: connection refused" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.317977 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.339530 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=6.231550733 podStartE2EDuration="12.339513278s" podCreationTimestamp="2025-11-25 12:47:00 +0000 UTC" firstStartedPulling="2025-11-25 12:47:01.509987194 +0000 UTC m=+1166.681579535" lastFinishedPulling="2025-11-25 12:47:07.617949739 +0000 UTC m=+1172.789542080" observedRunningTime="2025-11-25 12:47:08.864805542 +0000 UTC m=+1174.036397893" watchObservedRunningTime="2025-11-25 12:47:12.339513278 +0000 UTC m=+1177.511105619" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.376881 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cl8c2"] Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.377108 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="dnsmasq-dns" containerID="cri-o://77c3d7b0b167f0d69ce157923852fa0c1a656614e6435eea61b271c051b23408" gracePeriod=10 Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.439943 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.600413 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7ff8694d7d-s9pzw" podUID="51325415-d3b2-4852-bdce-6861cd1dc391" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.654309 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7ff8694d7d-s9pzw" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.750107 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-74b4cf8674-4p2tv"] Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.751127 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" containerID="cri-o://56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1" gracePeriod=30 Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.751918 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" containerID="cri-o://77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521" gracePeriod=30 Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.765451 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": EOF" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.766470 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": EOF" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.772619 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": EOF" Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.903767 4675 generic.go:334] "Generic (PLEG): container finished" podID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerID="77c3d7b0b167f0d69ce157923852fa0c1a656614e6435eea61b271c051b23408" exitCode=0 Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.904003 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" event={"ID":"dc02c331-c8e4-4c4a-864f-54fe5f391fad","Type":"ContainerDied","Data":"77c3d7b0b167f0d69ce157923852fa0c1a656614e6435eea61b271c051b23408"} Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.905901 4675 generic.go:334] "Generic (PLEG): container finished" podID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerID="56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1" exitCode=143 Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.906019 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74b4cf8674-4p2tv" event={"ID":"c0409088-0aa9-46b9-9685-8bb61c1f0557","Type":"ContainerDied","Data":"56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1"} Nov 25 12:47:12 crc kubenswrapper[4675]: I1125 12:47:12.911705 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.142:5353: connect: connection refused" Nov 25 12:47:12 crc kubenswrapper[4675]: E1125 12:47:12.975386 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc02c331_c8e4_4c4a_864f_54fe5f391fad.slice/crio-conmon-77c3d7b0b167f0d69ce157923852fa0c1a656614e6435eea61b271c051b23408.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0409088_0aa9_46b9_9685_8bb61c1f0557.slice/crio-56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1.scope\": RecentStats: unable to find data in memory cache]" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.099732 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.595030 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.595282 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bf4848886-8rwx5" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.755581 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.821664 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmbbf\" (UniqueName: \"kubernetes.io/projected/dc02c331-c8e4-4c4a-864f-54fe5f391fad-kube-api-access-rmbbf\") pod \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.821755 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-sb\") pod \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.821780 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-svc\") pod \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.821907 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-swift-storage-0\") pod \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.821950 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-config\") pod \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.821984 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-nb\") pod \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\" (UID: \"dc02c331-c8e4-4c4a-864f-54fe5f391fad\") " Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.855799 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc02c331-c8e4-4c4a-864f-54fe5f391fad-kube-api-access-rmbbf" (OuterVolumeSpecName: "kube-api-access-rmbbf") pod "dc02c331-c8e4-4c4a-864f-54fe5f391fad" (UID: "dc02c331-c8e4-4c4a-864f-54fe5f391fad"). InnerVolumeSpecName "kube-api-access-rmbbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.904384 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dc02c331-c8e4-4c4a-864f-54fe5f391fad" (UID: "dc02c331-c8e4-4c4a-864f-54fe5f391fad"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.948799 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.948841 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmbbf\" (UniqueName: \"kubernetes.io/projected/dc02c331-c8e4-4c4a-864f-54fe5f391fad-kube-api-access-rmbbf\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.953150 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dc02c331-c8e4-4c4a-864f-54fe5f391fad" (UID: "dc02c331-c8e4-4c4a-864f-54fe5f391fad"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.955317 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" event={"ID":"dc02c331-c8e4-4c4a-864f-54fe5f391fad","Type":"ContainerDied","Data":"1158e720ae0b8a45fec93ed17617cd8da1d509981abe220aae4cce3766036133"} Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.955360 4675 scope.go:117] "RemoveContainer" containerID="77c3d7b0b167f0d69ce157923852fa0c1a656614e6435eea61b271c051b23408" Nov 25 12:47:13 crc kubenswrapper[4675]: I1125 12:47:13.955475 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cl8c2" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.058167 4675 scope.go:117] "RemoveContainer" containerID="695bd9662390afb23583ebf9078df6151b906d2810a6acc606adb171c3e7d6df" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.060304 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.171292 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dc02c331-c8e4-4c4a-864f-54fe5f391fad" (UID: "dc02c331-c8e4-4c4a-864f-54fe5f391fad"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.176991 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.210930 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dc02c331-c8e4-4c4a-864f-54fe5f391fad" (UID: "dc02c331-c8e4-4c4a-864f-54fe5f391fad"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.239368 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-config" (OuterVolumeSpecName: "config") pod "dc02c331-c8e4-4c4a-864f-54fe5f391fad" (UID: "dc02c331-c8e4-4c4a-864f-54fe5f391fad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.278364 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.278392 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc02c331-c8e4-4c4a-864f-54fe5f391fad-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.313933 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cl8c2"] Nov 25 12:47:14 crc kubenswrapper[4675]: I1125 12:47:14.328297 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cl8c2"] Nov 25 12:47:15 crc kubenswrapper[4675]: I1125 12:47:15.548537 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" path="/var/lib/kubelet/pods/dc02c331-c8e4-4c4a-864f-54fe5f391fad/volumes" Nov 25 12:47:16 crc kubenswrapper[4675]: I1125 12:47:16.975013 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7ff8694d7d-s9pzw" podUID="51325415-d3b2-4852-bdce-6861cd1dc391" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.376372 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.436483 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.513408 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.808039 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.839313 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-686987849d-794s5" Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.989242 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="cinder-scheduler" containerID="cri-o://74e74f15d5046874ac521396d9948db9352772d544f80542597d46b844702d8e" gracePeriod=30 Nov 25 12:47:17 crc kubenswrapper[4675]: I1125 12:47:17.989889 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="probe" containerID="cri-o://a5f352c258bf31524bdb9559a6e82952fcdcc96d22f9f1a044cd414198f255e2" gracePeriod=30 Nov 25 12:47:18 crc kubenswrapper[4675]: I1125 12:47:18.997789 4675 generic.go:334] "Generic (PLEG): container finished" podID="c05847eb-7376-4c25-96e6-9218fa514493" containerID="0b663ec7c4f5e1fe8005bb05b1f4b514f633f609b2e481aa9cc021138088547d" exitCode=0 Nov 25 12:47:18 crc kubenswrapper[4675]: I1125 12:47:18.997855 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqss" event={"ID":"c05847eb-7376-4c25-96e6-9218fa514493","Type":"ContainerDied","Data":"0b663ec7c4f5e1fe8005bb05b1f4b514f633f609b2e481aa9cc021138088547d"} Nov 25 12:47:19 crc kubenswrapper[4675]: I1125 12:47:19.000028 4675 generic.go:334] "Generic (PLEG): container finished" podID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerID="a5f352c258bf31524bdb9559a6e82952fcdcc96d22f9f1a044cd414198f255e2" exitCode=0 Nov 25 12:47:19 crc kubenswrapper[4675]: I1125 12:47:19.000074 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2","Type":"ContainerDied","Data":"a5f352c258bf31524bdb9559a6e82952fcdcc96d22f9f1a044cd414198f255e2"} Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.022858 4675 generic.go:334] "Generic (PLEG): container finished" podID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerID="74e74f15d5046874ac521396d9948db9352772d544f80542597d46b844702d8e" exitCode=0 Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.022914 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2","Type":"ContainerDied","Data":"74e74f15d5046874ac521396d9948db9352772d544f80542597d46b844702d8e"} Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.218502 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.292861 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": read tcp 10.217.0.2:46682->10.217.0.160:9311: read: connection reset by peer" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.292874 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74b4cf8674-4p2tv" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": read tcp 10.217.0.2:46672->10.217.0.160:9311: read: connection reset by peer" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.387874 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-etc-machine-id\") pod \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.387988 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data-custom\") pod \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.388025 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-combined-ca-bundle\") pod \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.388061 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-scripts\") pod \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.388099 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data\") pod \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.388144 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccdx\" (UniqueName: \"kubernetes.io/projected/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-kube-api-access-6ccdx\") pod \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\" (UID: \"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.401223 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" (UID: "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.404335 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" (UID: "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.421070 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-kube-api-access-6ccdx" (OuterVolumeSpecName: "kube-api-access-6ccdx") pod "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" (UID: "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2"). InnerVolumeSpecName "kube-api-access-6ccdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.437282 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-scripts" (OuterVolumeSpecName: "scripts") pod "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" (UID: "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.490724 4675 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.490767 4675 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.490777 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.490787 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccdx\" (UniqueName: \"kubernetes.io/projected/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-kube-api-access-6ccdx\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.523991 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqss" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.529641 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" (UID: "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.551187 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data" (OuterVolumeSpecName: "config-data") pod "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" (UID: "786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.592301 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.592352 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.693354 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-config\") pod \"c05847eb-7376-4c25-96e6-9218fa514493\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.693852 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l76j6\" (UniqueName: \"kubernetes.io/projected/c05847eb-7376-4c25-96e6-9218fa514493-kube-api-access-l76j6\") pod \"c05847eb-7376-4c25-96e6-9218fa514493\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.694017 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-combined-ca-bundle\") pod \"c05847eb-7376-4c25-96e6-9218fa514493\" (UID: \"c05847eb-7376-4c25-96e6-9218fa514493\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.713404 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c05847eb-7376-4c25-96e6-9218fa514493-kube-api-access-l76j6" (OuterVolumeSpecName: "kube-api-access-l76j6") pod "c05847eb-7376-4c25-96e6-9218fa514493" (UID: "c05847eb-7376-4c25-96e6-9218fa514493"). InnerVolumeSpecName "kube-api-access-l76j6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.738197 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-config" (OuterVolumeSpecName: "config") pod "c05847eb-7376-4c25-96e6-9218fa514493" (UID: "c05847eb-7376-4c25-96e6-9218fa514493"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.743166 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c05847eb-7376-4c25-96e6-9218fa514493" (UID: "c05847eb-7376-4c25-96e6-9218fa514493"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.775121 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.795685 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.795719 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l76j6\" (UniqueName: \"kubernetes.io/projected/c05847eb-7376-4c25-96e6-9218fa514493-kube-api-access-l76j6\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.795730 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c05847eb-7376-4c25-96e6-9218fa514493-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.896720 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data-custom\") pod \"c0409088-0aa9-46b9-9685-8bb61c1f0557\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.896914 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-combined-ca-bundle\") pod \"c0409088-0aa9-46b9-9685-8bb61c1f0557\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.896941 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data\") pod \"c0409088-0aa9-46b9-9685-8bb61c1f0557\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.896972 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpmhf\" (UniqueName: \"kubernetes.io/projected/c0409088-0aa9-46b9-9685-8bb61c1f0557-kube-api-access-rpmhf\") pod \"c0409088-0aa9-46b9-9685-8bb61c1f0557\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.897049 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0409088-0aa9-46b9-9685-8bb61c1f0557-logs\") pod \"c0409088-0aa9-46b9-9685-8bb61c1f0557\" (UID: \"c0409088-0aa9-46b9-9685-8bb61c1f0557\") " Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.898017 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0409088-0aa9-46b9-9685-8bb61c1f0557-logs" (OuterVolumeSpecName: "logs") pod "c0409088-0aa9-46b9-9685-8bb61c1f0557" (UID: "c0409088-0aa9-46b9-9685-8bb61c1f0557"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.904536 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c0409088-0aa9-46b9-9685-8bb61c1f0557" (UID: "c0409088-0aa9-46b9-9685-8bb61c1f0557"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.904716 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0409088-0aa9-46b9-9685-8bb61c1f0557-kube-api-access-rpmhf" (OuterVolumeSpecName: "kube-api-access-rpmhf") pod "c0409088-0aa9-46b9-9685-8bb61c1f0557" (UID: "c0409088-0aa9-46b9-9685-8bb61c1f0557"). InnerVolumeSpecName "kube-api-access-rpmhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.933146 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0409088-0aa9-46b9-9685-8bb61c1f0557" (UID: "c0409088-0aa9-46b9-9685-8bb61c1f0557"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.945641 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data" (OuterVolumeSpecName: "config-data") pod "c0409088-0aa9-46b9-9685-8bb61c1f0557" (UID: "c0409088-0aa9-46b9-9685-8bb61c1f0557"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.999576 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0409088-0aa9-46b9-9685-8bb61c1f0557-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.999607 4675 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.999617 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.999626 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0409088-0aa9-46b9-9685-8bb61c1f0557-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:20 crc kubenswrapper[4675]: I1125 12:47:20.999636 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpmhf\" (UniqueName: \"kubernetes.io/projected/c0409088-0aa9-46b9-9685-8bb61c1f0557-kube-api-access-rpmhf\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.032607 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqss" event={"ID":"c05847eb-7376-4c25-96e6-9218fa514493","Type":"ContainerDied","Data":"5f8d8be246a3fc2897848e746de73342223c6bb8bdc859ae2c1006d0ada0cffd"} Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.032653 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f8d8be246a3fc2897848e746de73342223c6bb8bdc859ae2c1006d0ada0cffd" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.032624 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqss" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.034571 4675 generic.go:334] "Generic (PLEG): container finished" podID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerID="77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521" exitCode=0 Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.034628 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74b4cf8674-4p2tv" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.034632 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74b4cf8674-4p2tv" event={"ID":"c0409088-0aa9-46b9-9685-8bb61c1f0557","Type":"ContainerDied","Data":"77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521"} Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.034654 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74b4cf8674-4p2tv" event={"ID":"c0409088-0aa9-46b9-9685-8bb61c1f0557","Type":"ContainerDied","Data":"df9cd2a04d443ec88bc3867de4eda5cd567d71c2a167892cadaa9f4d1a51e098"} Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.034675 4675 scope.go:117] "RemoveContainer" containerID="77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.037311 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2","Type":"ContainerDied","Data":"2ad31b6e5c2df930c610d59e2cec704176713716c90cd4c95575361e7b53304d"} Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.037388 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.069013 4675 scope.go:117] "RemoveContainer" containerID="56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.198921 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.223236 4675 scope.go:117] "RemoveContainer" containerID="77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.225239 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521\": container with ID starting with 77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521 not found: ID does not exist" containerID="77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.225275 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521"} err="failed to get container status \"77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521\": rpc error: code = NotFound desc = could not find container \"77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521\": container with ID starting with 77de0a15e9d7fd3e72be0c374596949904121acd74770e54a19dbcfbb6763521 not found: ID does not exist" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.225301 4675 scope.go:117] "RemoveContainer" containerID="56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.225527 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1\": container with ID starting with 56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1 not found: ID does not exist" containerID="56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.225548 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1"} err="failed to get container status \"56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1\": rpc error: code = NotFound desc = could not find container \"56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1\": container with ID starting with 56d34c717372213e66c1496373fe933f295c3632efda67224ded308ca30b72f1 not found: ID does not exist" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.225628 4675 scope.go:117] "RemoveContainer" containerID="a5f352c258bf31524bdb9559a6e82952fcdcc96d22f9f1a044cd414198f255e2" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.241179 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.278417 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.278934 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="init" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.278950 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="init" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.278965 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="dnsmasq-dns" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.278972 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="dnsmasq-dns" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.278990 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="cinder-scheduler" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.278999 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="cinder-scheduler" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.279021 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c05847eb-7376-4c25-96e6-9218fa514493" containerName="neutron-db-sync" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279028 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c05847eb-7376-4c25-96e6-9218fa514493" containerName="neutron-db-sync" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.279045 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerName="init" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279052 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerName="init" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.279067 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerName="dnsmasq-dns" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279074 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerName="dnsmasq-dns" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.279088 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="probe" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279095 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="probe" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.279104 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279112 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" Nov 25 12:47:21 crc kubenswrapper[4675]: E1125 12:47:21.279128 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279135 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279780 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c05847eb-7376-4c25-96e6-9218fa514493" containerName="neutron-db-sync" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279805 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="cinder-scheduler" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279834 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api-log" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279844 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b2bb9c7-3d3a-4d45-ad42-2f422dea1c40" containerName="dnsmasq-dns" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279863 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc02c331-c8e4-4c4a-864f-54fe5f391fad" containerName="dnsmasq-dns" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279876 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" containerName="probe" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.279887 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" containerName="barbican-api" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.281057 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.294927 4675 scope.go:117] "RemoveContainer" containerID="74e74f15d5046874ac521396d9948db9352772d544f80542597d46b844702d8e" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.324769 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-74b4cf8674-4p2tv"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.326843 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.412566 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-74b4cf8674-4p2tv"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.456261 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.463802 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tb8fs\" (UniqueName: \"kubernetes.io/projected/46c62718-04bb-45f3-bd9c-08957f1241a7-kube-api-access-tb8fs\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.463924 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.463965 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/46c62718-04bb-45f3-bd9c-08957f1241a7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.465047 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.465117 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-config-data\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.465164 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-scripts\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.465992 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-rfj6r"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.468157 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.483953 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-rfj6r"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.508901 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b6974fd78-dvq24"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.510643 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.518253 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.518524 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-b6rw9" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.518771 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.518955 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.527390 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b6974fd78-dvq24"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.566389 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tb8fs\" (UniqueName: \"kubernetes.io/projected/46c62718-04bb-45f3-bd9c-08957f1241a7-kube-api-access-tb8fs\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.566433 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.566457 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/46c62718-04bb-45f3-bd9c-08957f1241a7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.566491 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.566531 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-config-data\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.566550 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-scripts\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.568082 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/46c62718-04bb-45f3-bd9c-08957f1241a7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.573592 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2" path="/var/lib/kubelet/pods/786c20ee-e7fa-4505-86ec-1c5d8cb4dcd2/volumes" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.574510 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0409088-0aa9-46b9-9685-8bb61c1f0557" path="/var/lib/kubelet/pods/c0409088-0aa9-46b9-9685-8bb61c1f0557/volumes" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.578315 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.584665 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-config-data\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.585199 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.587731 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tb8fs\" (UniqueName: \"kubernetes.io/projected/46c62718-04bb-45f3-bd9c-08957f1241a7-kube-api-access-tb8fs\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.612380 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c62718-04bb-45f3-bd9c-08957f1241a7-scripts\") pod \"cinder-scheduler-0\" (UID: \"46c62718-04bb-45f3-bd9c-08957f1241a7\") " pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.613203 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.669787 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.669850 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-config\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.669869 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz8sq\" (UniqueName: \"kubernetes.io/projected/be6b89d8-f743-4380-9b9b-29b36a55de4b-kube-api-access-lz8sq\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.669893 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-httpd-config\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.669963 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-ovndb-tls-certs\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.669993 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.670036 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdv2r\" (UniqueName: \"kubernetes.io/projected/12d2f6df-259a-49af-a8a7-df66608bd255-kube-api-access-wdv2r\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.670056 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-combined-ca-bundle\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.670075 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.670117 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-config\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.670133 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.771794 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772161 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772198 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-config\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772221 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz8sq\" (UniqueName: \"kubernetes.io/projected/be6b89d8-f743-4380-9b9b-29b36a55de4b-kube-api-access-lz8sq\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772254 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-httpd-config\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772318 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-ovndb-tls-certs\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772359 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772413 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdv2r\" (UniqueName: \"kubernetes.io/projected/12d2f6df-259a-49af-a8a7-df66608bd255-kube-api-access-wdv2r\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772442 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-combined-ca-bundle\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772469 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.772518 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-config\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.773166 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.773350 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-config\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.775127 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.776136 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.776461 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.781799 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-httpd-config\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.781880 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-config\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.803013 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz8sq\" (UniqueName: \"kubernetes.io/projected/be6b89d8-f743-4380-9b9b-29b36a55de4b-kube-api-access-lz8sq\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.804462 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-ovndb-tls-certs\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.806503 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-combined-ca-bundle\") pod \"neutron-b6974fd78-dvq24\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.808202 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdv2r\" (UniqueName: \"kubernetes.io/projected/12d2f6df-259a-49af-a8a7-df66608bd255-kube-api-access-wdv2r\") pod \"dnsmasq-dns-5c9776ccc5-rfj6r\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.811350 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.847542 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.922561 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.924015 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.929121 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-z7zkg" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.929335 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.929762 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 12:47:21 crc kubenswrapper[4675]: I1125 12:47:21.960121 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.081942 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4305dd31-2399-4e02-8b99-224a616e8c8c-openstack-config-secret\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.082057 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m97m8\" (UniqueName: \"kubernetes.io/projected/4305dd31-2399-4e02-8b99-224a616e8c8c-kube-api-access-m97m8\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.082087 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4305dd31-2399-4e02-8b99-224a616e8c8c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.082150 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4305dd31-2399-4e02-8b99-224a616e8c8c-openstack-config\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.183857 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m97m8\" (UniqueName: \"kubernetes.io/projected/4305dd31-2399-4e02-8b99-224a616e8c8c-kube-api-access-m97m8\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.183905 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4305dd31-2399-4e02-8b99-224a616e8c8c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.183937 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4305dd31-2399-4e02-8b99-224a616e8c8c-openstack-config\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.184011 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4305dd31-2399-4e02-8b99-224a616e8c8c-openstack-config-secret\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.188938 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4305dd31-2399-4e02-8b99-224a616e8c8c-openstack-config\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.193708 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4305dd31-2399-4e02-8b99-224a616e8c8c-openstack-config-secret\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.211462 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m97m8\" (UniqueName: \"kubernetes.io/projected/4305dd31-2399-4e02-8b99-224a616e8c8c-kube-api-access-m97m8\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.221132 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4305dd31-2399-4e02-8b99-224a616e8c8c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4305dd31-2399-4e02-8b99-224a616e8c8c\") " pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.282531 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.304840 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.595756 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-rfj6r"] Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.901566 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b6974fd78-dvq24"] Nov 25 12:47:22 crc kubenswrapper[4675]: I1125 12:47:22.972866 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 12:47:23 crc kubenswrapper[4675]: I1125 12:47:23.133781 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b6974fd78-dvq24" event={"ID":"be6b89d8-f743-4380-9b9b-29b36a55de4b","Type":"ContainerStarted","Data":"ca9d7f40f5ba143b2a937994a579a67f70b2aa334a0a719c217e36d1ad44159f"} Nov 25 12:47:23 crc kubenswrapper[4675]: I1125 12:47:23.146040 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"46c62718-04bb-45f3-bd9c-08957f1241a7","Type":"ContainerStarted","Data":"2db6f3980e4b2a6708a8057ef2872b1c860d6e0c3f028abae15096949ab26535"} Nov 25 12:47:23 crc kubenswrapper[4675]: I1125 12:47:23.160833 4675 generic.go:334] "Generic (PLEG): container finished" podID="12d2f6df-259a-49af-a8a7-df66608bd255" containerID="540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c" exitCode=0 Nov 25 12:47:23 crc kubenswrapper[4675]: I1125 12:47:23.160898 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" event={"ID":"12d2f6df-259a-49af-a8a7-df66608bd255","Type":"ContainerDied","Data":"540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c"} Nov 25 12:47:23 crc kubenswrapper[4675]: I1125 12:47:23.160924 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" event={"ID":"12d2f6df-259a-49af-a8a7-df66608bd255","Type":"ContainerStarted","Data":"67995dbc1ce6f4f0a25bdf36ac726793fd68ff887269a3f9f406f1b12cab3a7f"} Nov 25 12:47:23 crc kubenswrapper[4675]: I1125 12:47:23.165431 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"4305dd31-2399-4e02-8b99-224a616e8c8c","Type":"ContainerStarted","Data":"5e93c4935ebd7a97776854519cb08d3495f16c7c6dd6c71cc2c544802116db07"} Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.182317 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b6974fd78-dvq24" event={"ID":"be6b89d8-f743-4380-9b9b-29b36a55de4b","Type":"ContainerStarted","Data":"f68f77ca3171efb6bccb9c6d2429be22544ed61bfef4ac947736206b86a5bc2e"} Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.182915 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b6974fd78-dvq24" event={"ID":"be6b89d8-f743-4380-9b9b-29b36a55de4b","Type":"ContainerStarted","Data":"4186bdd8b758dbd20870a379874e6216cdc010bf1887419d78508f955760ae64"} Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.183037 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.188573 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"46c62718-04bb-45f3-bd9c-08957f1241a7","Type":"ContainerStarted","Data":"d58f55e3c395c279cfea92b1f50392f9c0a31acc35d617338ba70ca3858931eb"} Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.202288 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" event={"ID":"12d2f6df-259a-49af-a8a7-df66608bd255","Type":"ContainerStarted","Data":"7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30"} Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.202742 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.214172 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-b6974fd78-dvq24" podStartSLOduration=3.214154844 podStartE2EDuration="3.214154844s" podCreationTimestamp="2025-11-25 12:47:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:24.20972373 +0000 UTC m=+1189.381316071" watchObservedRunningTime="2025-11-25 12:47:24.214154844 +0000 UTC m=+1189.385747185" Nov 25 12:47:24 crc kubenswrapper[4675]: I1125 12:47:24.240728 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" podStartSLOduration=3.240708636 podStartE2EDuration="3.240708636s" podCreationTimestamp="2025-11-25 12:47:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:24.237974847 +0000 UTC m=+1189.409567198" watchObservedRunningTime="2025-11-25 12:47:24.240708636 +0000 UTC m=+1189.412300987" Nov 25 12:47:25 crc kubenswrapper[4675]: I1125 12:47:25.214568 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"46c62718-04bb-45f3-bd9c-08957f1241a7","Type":"ContainerStarted","Data":"af32396f35d88c0132f496f76f3e487b4191202a6441f8ef8616d0cd9c06222f"} Nov 25 12:47:25 crc kubenswrapper[4675]: I1125 12:47:25.246190 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.24617319 podStartE2EDuration="4.24617319s" podCreationTimestamp="2025-11-25 12:47:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:25.239197324 +0000 UTC m=+1190.410789665" watchObservedRunningTime="2025-11-25 12:47:25.24617319 +0000 UTC m=+1190.417765531" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.168629 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6c6c8d8969-kqpxz"] Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.177547 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.188299 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.188621 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208619 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-internal-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208701 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-public-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208749 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-httpd-config\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208793 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-combined-ca-bundle\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208876 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-config\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208911 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-ovndb-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.208934 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqgjt\" (UniqueName: \"kubernetes.io/projected/a69ab647-c53a-4fcc-86f0-d92a9eebf587-kube-api-access-vqgjt\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.210612 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6c6c8d8969-kqpxz"] Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310616 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-internal-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310675 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-public-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310724 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-httpd-config\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310789 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-combined-ca-bundle\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310850 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-config\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310903 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-ovndb-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.310926 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqgjt\" (UniqueName: \"kubernetes.io/projected/a69ab647-c53a-4fcc-86f0-d92a9eebf587-kube-api-access-vqgjt\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.322659 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-ovndb-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.325971 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-internal-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.336751 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-public-tls-certs\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.339578 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-combined-ca-bundle\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.341956 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-config\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.344733 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqgjt\" (UniqueName: \"kubernetes.io/projected/a69ab647-c53a-4fcc-86f0-d92a9eebf587-kube-api-access-vqgjt\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.358863 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a69ab647-c53a-4fcc-86f0-d92a9eebf587-httpd-config\") pod \"neutron-6c6c8d8969-kqpxz\" (UID: \"a69ab647-c53a-4fcc-86f0-d92a9eebf587\") " pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.508899 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:26 crc kubenswrapper[4675]: I1125 12:47:26.613598 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 12:47:27 crc kubenswrapper[4675]: I1125 12:47:27.346677 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6c6c8d8969-kqpxz"] Nov 25 12:47:27 crc kubenswrapper[4675]: I1125 12:47:27.805995 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.165:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.265302 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c6c8d8969-kqpxz" event={"ID":"a69ab647-c53a-4fcc-86f0-d92a9eebf587","Type":"ContainerStarted","Data":"2f10400998e36e800d15ace355f2c2f98e83986405aa217bd8094f7308362d9c"} Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.265363 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c6c8d8969-kqpxz" event={"ID":"a69ab647-c53a-4fcc-86f0-d92a9eebf587","Type":"ContainerStarted","Data":"f3f59dc340d98f9afa3f1f98d8465e33ad4773065ad2a0b19ba65f960d880978"} Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.265372 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c6c8d8969-kqpxz" event={"ID":"a69ab647-c53a-4fcc-86f0-d92a9eebf587","Type":"ContainerStarted","Data":"07c2c55c4629344c76388b0948464376143e5e75735d7e283d468b4b42c16f8c"} Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.266118 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.289881 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6c6c8d8969-kqpxz" podStartSLOduration=2.289860488 podStartE2EDuration="2.289860488s" podCreationTimestamp="2025-11-25 12:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:28.28592542 +0000 UTC m=+1193.457517761" watchObservedRunningTime="2025-11-25 12:47:28.289860488 +0000 UTC m=+1193.461452829" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.554245 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.554863 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-central-agent" containerID="cri-o://de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604" gracePeriod=30 Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.554933 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="proxy-httpd" containerID="cri-o://59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659" gracePeriod=30 Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.554999 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-notification-agent" containerID="cri-o://671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa" gracePeriod=30 Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.555009 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="sg-core" containerID="cri-o://2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708" gracePeriod=30 Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.592367 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.162:3000/\": EOF" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.927551 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-675f685b59-np48s"] Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.929585 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.931462 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.933618 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.935545 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.949142 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-675f685b59-np48s"] Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.978804 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/08133520-c4c6-4b59-b426-d18290e4195a-log-httpd\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.978868 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkbqq\" (UniqueName: \"kubernetes.io/projected/08133520-c4c6-4b59-b426-d18290e4195a-kube-api-access-hkbqq\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.978921 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/08133520-c4c6-4b59-b426-d18290e4195a-etc-swift\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.978943 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/08133520-c4c6-4b59-b426-d18290e4195a-run-httpd\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.978965 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-public-tls-certs\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.978982 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-combined-ca-bundle\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.979036 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-config-data\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:28 crc kubenswrapper[4675]: I1125 12:47:28.979052 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-internal-tls-certs\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.087276 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/08133520-c4c6-4b59-b426-d18290e4195a-log-httpd\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.087691 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkbqq\" (UniqueName: \"kubernetes.io/projected/08133520-c4c6-4b59-b426-d18290e4195a-kube-api-access-hkbqq\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.087743 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/08133520-c4c6-4b59-b426-d18290e4195a-etc-swift\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.087785 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/08133520-c4c6-4b59-b426-d18290e4195a-run-httpd\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.087836 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-public-tls-certs\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.087865 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-combined-ca-bundle\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.088001 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-config-data\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.088025 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-internal-tls-certs\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.089683 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/08133520-c4c6-4b59-b426-d18290e4195a-run-httpd\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.093729 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-combined-ca-bundle\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.097474 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-public-tls-certs\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.100119 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/08133520-c4c6-4b59-b426-d18290e4195a-log-httpd\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.100702 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-internal-tls-certs\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.106258 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/08133520-c4c6-4b59-b426-d18290e4195a-etc-swift\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.109556 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08133520-c4c6-4b59-b426-d18290e4195a-config-data\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.120722 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkbqq\" (UniqueName: \"kubernetes.io/projected/08133520-c4c6-4b59-b426-d18290e4195a-kube-api-access-hkbqq\") pod \"swift-proxy-675f685b59-np48s\" (UID: \"08133520-c4c6-4b59-b426-d18290e4195a\") " pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.250255 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.289763 4675 generic.go:334] "Generic (PLEG): container finished" podID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerID="59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659" exitCode=0 Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.289796 4675 generic.go:334] "Generic (PLEG): container finished" podID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerID="2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708" exitCode=2 Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.290874 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerDied","Data":"59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659"} Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.290906 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerDied","Data":"2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708"} Nov 25 12:47:29 crc kubenswrapper[4675]: I1125 12:47:29.951000 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-675f685b59-np48s"] Nov 25 12:47:30 crc kubenswrapper[4675]: I1125 12:47:30.304389 4675 generic.go:334] "Generic (PLEG): container finished" podID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerID="de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604" exitCode=0 Nov 25 12:47:30 crc kubenswrapper[4675]: I1125 12:47:30.304505 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerDied","Data":"de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604"} Nov 25 12:47:30 crc kubenswrapper[4675]: I1125 12:47:30.306608 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-675f685b59-np48s" event={"ID":"08133520-c4c6-4b59-b426-d18290e4195a","Type":"ContainerStarted","Data":"3b1730cb84a03afcb05dc279e82a0956b47726b78c656d641c7b748773f16170"} Nov 25 12:47:30 crc kubenswrapper[4675]: I1125 12:47:30.945838 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.162:3000/\": dial tcp 10.217.0.162:3000: connect: connection refused" Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.337765 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-675f685b59-np48s" event={"ID":"08133520-c4c6-4b59-b426-d18290e4195a","Type":"ContainerStarted","Data":"fd12cc90cc67a5eb316226ba074f7c8698cbf5f654caa702ca75a4117cef9264"} Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.338022 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-675f685b59-np48s" event={"ID":"08133520-c4c6-4b59-b426-d18290e4195a","Type":"ContainerStarted","Data":"5d0c76a4d47e4f65f5f7e758b81609692bcb5bff257be28fdc7a6cb7e7acf43c"} Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.338494 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.338514 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.367556 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-675f685b59-np48s" podStartSLOduration=3.3675387900000002 podStartE2EDuration="3.36753879s" podCreationTimestamp="2025-11-25 12:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:31.364064587 +0000 UTC m=+1196.535656948" watchObservedRunningTime="2025-11-25 12:47:31.36753879 +0000 UTC m=+1196.539131131" Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.815438 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.944507 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-qn76l"] Nov 25 12:47:31 crc kubenswrapper[4675]: I1125 12:47:31.944778 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" containerID="cri-o://811f46e24f53511dababf0245bef9462c32547c8857e7ebb539e1455ed9de3c3" gracePeriod=10 Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.150984 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.201790 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.267271 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-combined-ca-bundle\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.267348 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-run-httpd\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.267393 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6pv5\" (UniqueName: \"kubernetes.io/projected/4826c966-919b-42a1-a9e4-a6c7ea65c426-kube-api-access-t6pv5\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.267447 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-scripts\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.267481 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-sg-core-conf-yaml\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.268083 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.268495 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-log-httpd\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.268529 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-config-data\") pod \"4826c966-919b-42a1-a9e4-a6c7ea65c426\" (UID: \"4826c966-919b-42a1-a9e4-a6c7ea65c426\") " Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.269086 4675 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.278900 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-scripts" (OuterVolumeSpecName: "scripts") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.279789 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4826c966-919b-42a1-a9e4-a6c7ea65c426-kube-api-access-t6pv5" (OuterVolumeSpecName: "kube-api-access-t6pv5") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "kube-api-access-t6pv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.279936 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.317024 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: connect: connection refused" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.340471 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.370433 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6pv5\" (UniqueName: \"kubernetes.io/projected/4826c966-919b-42a1-a9e4-a6c7ea65c426-kube-api-access-t6pv5\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.370468 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.370477 4675 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.370485 4675 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4826c966-919b-42a1-a9e4-a6c7ea65c426-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.390859 4675 generic.go:334] "Generic (PLEG): container finished" podID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerID="671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa" exitCode=0 Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.390934 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerDied","Data":"671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.390961 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4826c966-919b-42a1-a9e4-a6c7ea65c426","Type":"ContainerDied","Data":"0b71a15353ccb501129618df52f2833ba034333805d002dd4d92840c4e7e05cb"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.390978 4675 scope.go:117] "RemoveContainer" containerID="59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.391229 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.414000 4675 generic.go:334] "Generic (PLEG): container finished" podID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerID="7bf144cdeff17dc28536fb9d88234abffec553b120667f546a29ca608a4da773" exitCode=137 Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.414250 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerDied","Data":"7bf144cdeff17dc28536fb9d88234abffec553b120667f546a29ca608a4da773"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.414357 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerStarted","Data":"5895893fb049f53c9c3d5b457ebc6cc8920b7a0658b046ead35f78944694da4f"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.436436 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.442934 4675 generic.go:334] "Generic (PLEG): container finished" podID="412d2040-4c83-4443-989e-cc844466e840" containerID="b8727dfcb754c1134bbfe12d6c7ac53701bf0d8ea86188c3e45e1d8ade2e2c7f" exitCode=137 Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.442965 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerDied","Data":"b8727dfcb754c1134bbfe12d6c7ac53701bf0d8ea86188c3e45e1d8ade2e2c7f"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.454602 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerStarted","Data":"a8a62130160dd33e971232bb74e27eaa3176dab69a60bc1e0084e518a0762ce4"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.476043 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.497507 4675 generic.go:334] "Generic (PLEG): container finished" podID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerID="811f46e24f53511dababf0245bef9462c32547c8857e7ebb539e1455ed9de3c3" exitCode=0 Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.497978 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" event={"ID":"15f85be0-29b9-44e3-a88e-d1531192fbe0","Type":"ContainerDied","Data":"811f46e24f53511dababf0245bef9462c32547c8857e7ebb539e1455ed9de3c3"} Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.536273 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-config-data" (OuterVolumeSpecName: "config-data") pod "4826c966-919b-42a1-a9e4-a6c7ea65c426" (UID: "4826c966-919b-42a1-a9e4-a6c7ea65c426"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.580990 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826c966-919b-42a1-a9e4-a6c7ea65c426-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.728359 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.739672 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.761368 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:32 crc kubenswrapper[4675]: E1125 12:47:32.761794 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-central-agent" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.761829 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-central-agent" Nov 25 12:47:32 crc kubenswrapper[4675]: E1125 12:47:32.761844 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-notification-agent" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.761850 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-notification-agent" Nov 25 12:47:32 crc kubenswrapper[4675]: E1125 12:47:32.761881 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="sg-core" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.761887 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="sg-core" Nov 25 12:47:32 crc kubenswrapper[4675]: E1125 12:47:32.761900 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="proxy-httpd" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.761905 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="proxy-httpd" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.762086 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-central-agent" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.762102 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="proxy-httpd" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.762122 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="sg-core" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.762132 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" containerName="ceilometer-notification-agent" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.763657 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.768020 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.768727 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.804038 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.849054 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.165:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890011 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-config-data\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890099 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-log-httpd\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890140 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890443 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890573 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spxdl\" (UniqueName: \"kubernetes.io/projected/631df7ea-47d6-481f-951a-5a2450cf7eb0-kube-api-access-spxdl\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890659 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-scripts\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.890728 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-run-httpd\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992288 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spxdl\" (UniqueName: \"kubernetes.io/projected/631df7ea-47d6-481f-951a-5a2450cf7eb0-kube-api-access-spxdl\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992351 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-scripts\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992377 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-run-httpd\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992407 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-config-data\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992440 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-log-httpd\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992465 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.992531 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.993481 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-run-httpd\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.993619 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-log-httpd\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.999110 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-scripts\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:32 crc kubenswrapper[4675]: I1125 12:47:32.999449 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:33 crc kubenswrapper[4675]: I1125 12:47:33.002013 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:33 crc kubenswrapper[4675]: I1125 12:47:33.016728 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-config-data\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:33 crc kubenswrapper[4675]: I1125 12:47:33.027452 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spxdl\" (UniqueName: \"kubernetes.io/projected/631df7ea-47d6-481f-951a-5a2450cf7eb0-kube-api-access-spxdl\") pod \"ceilometer-0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " pod="openstack/ceilometer-0" Nov 25 12:47:33 crc kubenswrapper[4675]: I1125 12:47:33.084488 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:33 crc kubenswrapper[4675]: I1125 12:47:33.543319 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4826c966-919b-42a1-a9e4-a6c7ea65c426" path="/var/lib/kubelet/pods/4826c966-919b-42a1-a9e4-a6c7ea65c426/volumes" Nov 25 12:47:35 crc kubenswrapper[4675]: I1125 12:47:35.305607 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-675f685b59-np48s" podUID="08133520-c4c6-4b59-b426-d18290e4195a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 12:47:37 crc kubenswrapper[4675]: I1125 12:47:37.605958 4675 generic.go:334] "Generic (PLEG): container finished" podID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerID="d6d2367b214b5ab769e48945098118f6bd2450ed6b208711463b4b01b2f6f44c" exitCode=137 Nov 25 12:47:37 crc kubenswrapper[4675]: I1125 12:47:37.606140 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ec74e66d-2eb7-4938-9e9e-1f19010c880b","Type":"ContainerDied","Data":"d6d2367b214b5ab769e48945098118f6bd2450ed6b208711463b4b01b2f6f44c"} Nov 25 12:47:37 crc kubenswrapper[4675]: I1125 12:47:37.763666 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.165:8776/healthcheck\": dial tcp 10.217.0.165:8776: connect: connection refused" Nov 25 12:47:39 crc kubenswrapper[4675]: I1125 12:47:39.165894 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:39 crc kubenswrapper[4675]: I1125 12:47:39.263768 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:39 crc kubenswrapper[4675]: I1125 12:47:39.265383 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-675f685b59-np48s" Nov 25 12:47:41 crc kubenswrapper[4675]: I1125 12:47:41.253055 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:47:41 crc kubenswrapper[4675]: I1125 12:47:41.254162 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:47:41 crc kubenswrapper[4675]: I1125 12:47:41.255527 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:47:41 crc kubenswrapper[4675]: I1125 12:47:41.375754 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:47:41 crc kubenswrapper[4675]: I1125 12:47:41.376149 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:47:41 crc kubenswrapper[4675]: I1125 12:47:41.379607 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:47:42 crc kubenswrapper[4675]: I1125 12:47:42.316068 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: i/o timeout" Nov 25 12:47:42 crc kubenswrapper[4675]: I1125 12:47:42.592182 4675 scope.go:117] "RemoveContainer" containerID="334ac133590cc971093eabc38fc0eff8bc350c02a3c742c2f3338da96a242025" Nov 25 12:47:42 crc kubenswrapper[4675]: I1125 12:47:42.763628 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.165:8776/healthcheck\": dial tcp 10.217.0.165:8776: connect: connection refused" Nov 25 12:47:42 crc kubenswrapper[4675]: I1125 12:47:42.976246 4675 scope.go:117] "RemoveContainer" containerID="2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.060011 4675 scope.go:117] "RemoveContainer" containerID="649637c5d8caee75d783dc9284d928ffd7ceb952959ae546d3d8496f3246629d" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.078690 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.078903 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n568h5f5h599h66h5fchc6hddh6ch5b5hf7hc4h5d5h65chcbh5c9hffh66bh7h59h669h697h549h5d9h658hfbh57dh549h5d9h7fh5bdh568h88q,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m97m8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(4305dd31-2399-4e02-8b99-224a616e8c8c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.086343 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="4305dd31-2399-4e02-8b99-224a616e8c8c" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.182423 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.220942 4675 scope.go:117] "RemoveContainer" containerID="671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.298539 4675 scope.go:117] "RemoveContainer" containerID="de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.299766 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-swift-storage-0\") pod \"15f85be0-29b9-44e3-a88e-d1531192fbe0\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.299888 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-sb\") pod \"15f85be0-29b9-44e3-a88e-d1531192fbe0\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.299971 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5ccd\" (UniqueName: \"kubernetes.io/projected/15f85be0-29b9-44e3-a88e-d1531192fbe0-kube-api-access-t5ccd\") pod \"15f85be0-29b9-44e3-a88e-d1531192fbe0\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.300000 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-nb\") pod \"15f85be0-29b9-44e3-a88e-d1531192fbe0\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.300018 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-svc\") pod \"15f85be0-29b9-44e3-a88e-d1531192fbe0\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.300060 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-config\") pod \"15f85be0-29b9-44e3-a88e-d1531192fbe0\" (UID: \"15f85be0-29b9-44e3-a88e-d1531192fbe0\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.318950 4675 scope.go:117] "RemoveContainer" containerID="58d6171b528d54d6fd4e0fa0df234d2391eb0b9802c8f8c733a4a5be78925fb9" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.323173 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15f85be0-29b9-44e3-a88e-d1531192fbe0-kube-api-access-t5ccd" (OuterVolumeSpecName: "kube-api-access-t5ccd") pod "15f85be0-29b9-44e3-a88e-d1531192fbe0" (UID: "15f85be0-29b9-44e3-a88e-d1531192fbe0"). InnerVolumeSpecName "kube-api-access-t5ccd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.402447 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5ccd\" (UniqueName: \"kubernetes.io/projected/15f85be0-29b9-44e3-a88e-d1531192fbe0-kube-api-access-t5ccd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.412241 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-config" (OuterVolumeSpecName: "config") pod "15f85be0-29b9-44e3-a88e-d1531192fbe0" (UID: "15f85be0-29b9-44e3-a88e-d1531192fbe0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.421040 4675 scope.go:117] "RemoveContainer" containerID="59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.441697 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "15f85be0-29b9-44e3-a88e-d1531192fbe0" (UID: "15f85be0-29b9-44e3-a88e-d1531192fbe0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.442611 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659\": container with ID starting with 59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659 not found: ID does not exist" containerID="59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.442638 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659"} err="failed to get container status \"59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659\": rpc error: code = NotFound desc = could not find container \"59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659\": container with ID starting with 59832ab2c9c1936c2a2f14c8919fc9dc1f940c5c0a03658a85d4593b2f375659 not found: ID does not exist" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.442660 4675 scope.go:117] "RemoveContainer" containerID="2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.451212 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708\": container with ID starting with 2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708 not found: ID does not exist" containerID="2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.451239 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708"} err="failed to get container status \"2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708\": rpc error: code = NotFound desc = could not find container \"2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708\": container with ID starting with 2870979bea140b46cfd99ae21a8acbe704668e51bd4012641201775bbd82f708 not found: ID does not exist" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.451261 4675 scope.go:117] "RemoveContainer" containerID="671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.453623 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa\": container with ID starting with 671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa not found: ID does not exist" containerID="671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.453644 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa"} err="failed to get container status \"671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa\": rpc error: code = NotFound desc = could not find container \"671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa\": container with ID starting with 671f16c965b55cde124b57524d6ec3a4aaa4549aa6989ce25f8965c4f10ec8fa not found: ID does not exist" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.453679 4675 scope.go:117] "RemoveContainer" containerID="de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.453915 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604\": container with ID starting with de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604 not found: ID does not exist" containerID="de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.453947 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604"} err="failed to get container status \"de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604\": rpc error: code = NotFound desc = could not find container \"de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604\": container with ID starting with de4f146f31bfd075eb3eebf6bd17bbc1255d126034bea48f8b7835a237918604 not found: ID does not exist" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.484479 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "15f85be0-29b9-44e3-a88e-d1531192fbe0" (UID: "15f85be0-29b9-44e3-a88e-d1531192fbe0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.491135 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "15f85be0-29b9-44e3-a88e-d1531192fbe0" (UID: "15f85be0-29b9-44e3-a88e-d1531192fbe0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.505478 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.505506 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.505908 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.505923 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.506919 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.509476 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "15f85be0-29b9-44e3-a88e-d1531192fbe0" (UID: "15f85be0-29b9-44e3-a88e-d1531192fbe0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607581 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data-custom\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607636 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-combined-ca-bundle\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607699 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec74e66d-2eb7-4938-9e9e-1f19010c880b-logs\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607744 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ec74e66d-2eb7-4938-9e9e-1f19010c880b-etc-machine-id\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607766 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-scripts\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607793 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.607841 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-472tn\" (UniqueName: \"kubernetes.io/projected/ec74e66d-2eb7-4938-9e9e-1f19010c880b-kube-api-access-472tn\") pod \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\" (UID: \"ec74e66d-2eb7-4938-9e9e-1f19010c880b\") " Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.608277 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15f85be0-29b9-44e3-a88e-d1531192fbe0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.609756 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec74e66d-2eb7-4938-9e9e-1f19010c880b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.609791 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec74e66d-2eb7-4938-9e9e-1f19010c880b-logs" (OuterVolumeSpecName: "logs") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.616922 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec74e66d-2eb7-4938-9e9e-1f19010c880b-kube-api-access-472tn" (OuterVolumeSpecName: "kube-api-access-472tn") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "kube-api-access-472tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.620071 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.627353 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-scripts" (OuterVolumeSpecName: "scripts") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.676612 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.685946 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.686233 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ec74e66d-2eb7-4938-9e9e-1f19010c880b","Type":"ContainerDied","Data":"e9106f991db93c7d7b0a0518fd1a7c7fb0877063988de238b29e0a8a33b92024"} Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.686291 4675 scope.go:117] "RemoveContainer" containerID="d6d2367b214b5ab769e48945098118f6bd2450ed6b208711463b4b01b2f6f44c" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.710354 4675 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.710378 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.710386 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec74e66d-2eb7-4938-9e9e-1f19010c880b-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.710395 4675 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ec74e66d-2eb7-4938-9e9e-1f19010c880b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.710403 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.710410 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-472tn\" (UniqueName: \"kubernetes.io/projected/ec74e66d-2eb7-4938-9e9e-1f19010c880b-kube-api-access-472tn\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.714135 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data" (OuterVolumeSpecName: "config-data") pod "ec74e66d-2eb7-4938-9e9e-1f19010c880b" (UID: "ec74e66d-2eb7-4938-9e9e-1f19010c880b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.714381 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" event={"ID":"15f85be0-29b9-44e3-a88e-d1531192fbe0","Type":"ContainerDied","Data":"be839c8375ba233e732676f96bce0e82d524ba14ec83df6489d8360d1902e6b8"} Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.714469 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" Nov 25 12:47:43 crc kubenswrapper[4675]: E1125 12:47:43.718448 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="4305dd31-2399-4e02-8b99-224a616e8c8c" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.767960 4675 scope.go:117] "RemoveContainer" containerID="37f4b32e8c1bf54ddd8242923fb38d9eb3831675fb999257d91c500a88323bad" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.768085 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-qn76l"] Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.790952 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-qn76l"] Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.813132 4675 scope.go:117] "RemoveContainer" containerID="811f46e24f53511dababf0245bef9462c32547c8857e7ebb539e1455ed9de3c3" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.819350 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec74e66d-2eb7-4938-9e9e-1f19010c880b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.856582 4675 scope.go:117] "RemoveContainer" containerID="e875e1a145a79f701b8ed23360286173ac974cbd581f145cec318f86ee5e6025" Nov 25 12:47:43 crc kubenswrapper[4675]: I1125 12:47:43.884660 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.118785 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.135537 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.158008 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:44 crc kubenswrapper[4675]: E1125 12:47:44.158591 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.158653 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" Nov 25 12:47:44 crc kubenswrapper[4675]: E1125 12:47:44.158708 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api-log" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.158756 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api-log" Nov 25 12:47:44 crc kubenswrapper[4675]: E1125 12:47:44.158806 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="init" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.158871 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="init" Nov 25 12:47:44 crc kubenswrapper[4675]: E1125 12:47:44.158932 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.158982 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.159189 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api-log" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.159247 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.159322 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" containerName="cinder-api" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.160644 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.164553 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.164845 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.164978 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.176653 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230495 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230545 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-config-data-custom\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230564 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230587 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-logs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230607 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230635 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-config-data\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230697 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5448t\" (UniqueName: \"kubernetes.io/projected/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-kube-api-access-5448t\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230715 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-scripts\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.230744 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.332926 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-logs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.332998 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333059 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-config-data\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333145 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5448t\" (UniqueName: \"kubernetes.io/projected/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-kube-api-access-5448t\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333167 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-scripts\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333247 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333306 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333340 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-config-data-custom\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.333424 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.334177 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-logs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.334232 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.344488 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-config-data-custom\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.350413 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-config-data\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.354363 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-scripts\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.356633 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.357076 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.357658 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.378459 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5448t\" (UniqueName: \"kubernetes.io/projected/90a143ed-4c09-4ac7-8dd9-869c15e9ef3c-kube-api-access-5448t\") pod \"cinder-api-0\" (UID: \"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c\") " pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.476227 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.768968 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerStarted","Data":"a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815"} Nov 25 12:47:44 crc kubenswrapper[4675]: I1125 12:47:44.769498 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerStarted","Data":"5ac32562664c7beccbb68e44d17e568d31f089d966e363c2a60bc458125bf938"} Nov 25 12:47:45 crc kubenswrapper[4675]: I1125 12:47:44.999143 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 12:47:45 crc kubenswrapper[4675]: W1125 12:47:45.045157 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90a143ed_4c09_4ac7_8dd9_869c15e9ef3c.slice/crio-46cafca2e5c8fa2baf2a124f265d2d2c5f6aa7b5664c625dd9af65096ba88500 WatchSource:0}: Error finding container 46cafca2e5c8fa2baf2a124f265d2d2c5f6aa7b5664c625dd9af65096ba88500: Status 404 returned error can't find the container with id 46cafca2e5c8fa2baf2a124f265d2d2c5f6aa7b5664c625dd9af65096ba88500 Nov 25 12:47:45 crc kubenswrapper[4675]: I1125 12:47:45.547917 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" path="/var/lib/kubelet/pods/15f85be0-29b9-44e3-a88e-d1531192fbe0/volumes" Nov 25 12:47:45 crc kubenswrapper[4675]: I1125 12:47:45.548740 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec74e66d-2eb7-4938-9e9e-1f19010c880b" path="/var/lib/kubelet/pods/ec74e66d-2eb7-4938-9e9e-1f19010c880b/volumes" Nov 25 12:47:45 crc kubenswrapper[4675]: I1125 12:47:45.808070 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerStarted","Data":"06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27"} Nov 25 12:47:45 crc kubenswrapper[4675]: I1125 12:47:45.817690 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c","Type":"ContainerStarted","Data":"46cafca2e5c8fa2baf2a124f265d2d2c5f6aa7b5664c625dd9af65096ba88500"} Nov 25 12:47:46 crc kubenswrapper[4675]: I1125 12:47:46.833055 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerStarted","Data":"aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c"} Nov 25 12:47:46 crc kubenswrapper[4675]: I1125 12:47:46.838444 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c","Type":"ContainerStarted","Data":"42eca7cc5f5a540977e7f629321d5b754934cf5529f28c97af5ec95a93cf49f0"} Nov 25 12:47:47 crc kubenswrapper[4675]: I1125 12:47:47.320164 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-795f4db4bc-qn76l" podUID="15f85be0-29b9-44e3-a88e-d1531192fbe0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: i/o timeout" Nov 25 12:47:47 crc kubenswrapper[4675]: I1125 12:47:47.853408 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"90a143ed-4c09-4ac7-8dd9-869c15e9ef3c","Type":"ContainerStarted","Data":"9f1a4f57f028e4bb39591ac3736cb1ac65d883d30ee2aaf4006249775baa6db3"} Nov 25 12:47:47 crc kubenswrapper[4675]: I1125 12:47:47.854912 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 12:47:47 crc kubenswrapper[4675]: I1125 12:47:47.887573 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.887552941 podStartE2EDuration="3.887552941s" podCreationTimestamp="2025-11-25 12:47:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:47:47.885145423 +0000 UTC m=+1213.056737764" watchObservedRunningTime="2025-11-25 12:47:47.887552941 +0000 UTC m=+1213.059145282" Nov 25 12:47:48 crc kubenswrapper[4675]: I1125 12:47:48.868069 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerStarted","Data":"89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e"} Nov 25 12:47:48 crc kubenswrapper[4675]: I1125 12:47:48.868375 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-central-agent" containerID="cri-o://a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815" gracePeriod=30 Nov 25 12:47:48 crc kubenswrapper[4675]: I1125 12:47:48.868445 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="sg-core" containerID="cri-o://aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c" gracePeriod=30 Nov 25 12:47:48 crc kubenswrapper[4675]: I1125 12:47:48.868473 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-notification-agent" containerID="cri-o://06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27" gracePeriod=30 Nov 25 12:47:48 crc kubenswrapper[4675]: I1125 12:47:48.868538 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="proxy-httpd" containerID="cri-o://89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e" gracePeriod=30 Nov 25 12:47:48 crc kubenswrapper[4675]: I1125 12:47:48.918506 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=12.905941119 podStartE2EDuration="16.918486222s" podCreationTimestamp="2025-11-25 12:47:32 +0000 UTC" firstStartedPulling="2025-11-25 12:47:43.824255719 +0000 UTC m=+1208.995848060" lastFinishedPulling="2025-11-25 12:47:47.836800822 +0000 UTC m=+1213.008393163" observedRunningTime="2025-11-25 12:47:48.909260072 +0000 UTC m=+1214.080852423" watchObservedRunningTime="2025-11-25 12:47:48.918486222 +0000 UTC m=+1214.090078563" Nov 25 12:47:49 crc kubenswrapper[4675]: I1125 12:47:49.880253 4675 generic.go:334] "Generic (PLEG): container finished" podID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerID="89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e" exitCode=0 Nov 25 12:47:49 crc kubenswrapper[4675]: I1125 12:47:49.881280 4675 generic.go:334] "Generic (PLEG): container finished" podID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerID="aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c" exitCode=2 Nov 25 12:47:49 crc kubenswrapper[4675]: I1125 12:47:49.881391 4675 generic.go:334] "Generic (PLEG): container finished" podID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerID="06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27" exitCode=0 Nov 25 12:47:49 crc kubenswrapper[4675]: I1125 12:47:49.882483 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerDied","Data":"89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e"} Nov 25 12:47:49 crc kubenswrapper[4675]: I1125 12:47:49.882616 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerDied","Data":"aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c"} Nov 25 12:47:49 crc kubenswrapper[4675]: I1125 12:47:49.882711 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerDied","Data":"06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27"} Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.266138 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.380706 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.588129 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.637259 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-scripts\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.637655 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-run-httpd\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.637757 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-combined-ca-bundle\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.637857 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spxdl\" (UniqueName: \"kubernetes.io/projected/631df7ea-47d6-481f-951a-5a2450cf7eb0-kube-api-access-spxdl\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.638016 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-log-httpd\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.638095 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-sg-core-conf-yaml\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.638202 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-config-data\") pod \"631df7ea-47d6-481f-951a-5a2450cf7eb0\" (UID: \"631df7ea-47d6-481f-951a-5a2450cf7eb0\") " Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.639567 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.640048 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.646261 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/631df7ea-47d6-481f-951a-5a2450cf7eb0-kube-api-access-spxdl" (OuterVolumeSpecName: "kube-api-access-spxdl") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "kube-api-access-spxdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.654839 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-scripts" (OuterVolumeSpecName: "scripts") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.740950 4675 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.740993 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spxdl\" (UniqueName: \"kubernetes.io/projected/631df7ea-47d6-481f-951a-5a2450cf7eb0-kube-api-access-spxdl\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.741008 4675 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/631df7ea-47d6-481f-951a-5a2450cf7eb0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.741019 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.758696 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.778024 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.786056 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-config-data" (OuterVolumeSpecName: "config-data") pod "631df7ea-47d6-481f-951a-5a2450cf7eb0" (UID: "631df7ea-47d6-481f-951a-5a2450cf7eb0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.842867 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.842905 4675 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.842928 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/631df7ea-47d6-481f-951a-5a2450cf7eb0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.866113 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.902627 4675 generic.go:334] "Generic (PLEG): container finished" podID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerID="a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815" exitCode=0 Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.902692 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerDied","Data":"a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815"} Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.902722 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"631df7ea-47d6-481f-951a-5a2450cf7eb0","Type":"ContainerDied","Data":"5ac32562664c7beccbb68e44d17e568d31f089d966e363c2a60bc458125bf938"} Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.902741 4675 scope.go:117] "RemoveContainer" containerID="89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.902910 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.945672 4675 scope.go:117] "RemoveContainer" containerID="aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c" Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.991428 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:51 crc kubenswrapper[4675]: I1125 12:47:51.998509 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.022513 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.023141 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="proxy-httpd" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.023220 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="proxy-httpd" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.023282 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-notification-agent" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.023338 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-notification-agent" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.023398 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-central-agent" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.023447 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-central-agent" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.023514 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="sg-core" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.023586 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="sg-core" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.023902 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-central-agent" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.023981 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="proxy-httpd" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.024035 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="ceilometer-notification-agent" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.024088 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" containerName="sg-core" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.026197 4675 scope.go:117] "RemoveContainer" containerID="06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.028948 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.045878 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.046212 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.049087 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.071104 4675 scope.go:117] "RemoveContainer" containerID="a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.141367 4675 scope.go:117] "RemoveContainer" containerID="89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.141781 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e\": container with ID starting with 89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e not found: ID does not exist" containerID="89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.141808 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e"} err="failed to get container status \"89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e\": rpc error: code = NotFound desc = could not find container \"89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e\": container with ID starting with 89f0396b0b9c437347d15e90ccb404aff8fb2134c2406993d653993271e1e41e not found: ID does not exist" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.141844 4675 scope.go:117] "RemoveContainer" containerID="aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.142353 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c\": container with ID starting with aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c not found: ID does not exist" containerID="aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.142408 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c"} err="failed to get container status \"aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c\": rpc error: code = NotFound desc = could not find container \"aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c\": container with ID starting with aa27041d5d0002f5755630e27c3b7161fe06cfcbd57f11146a796ba06cb8bd6c not found: ID does not exist" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.142434 4675 scope.go:117] "RemoveContainer" containerID="06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.142724 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27\": container with ID starting with 06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27 not found: ID does not exist" containerID="06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.142740 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27"} err="failed to get container status \"06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27\": rpc error: code = NotFound desc = could not find container \"06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27\": container with ID starting with 06482db2cf0a56f6e9d707f97281e06ff7ded87142d8f43986389ec8f83f3e27 not found: ID does not exist" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.142771 4675 scope.go:117] "RemoveContainer" containerID="a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815" Nov 25 12:47:52 crc kubenswrapper[4675]: E1125 12:47:52.143032 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815\": container with ID starting with a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815 not found: ID does not exist" containerID="a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.143047 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815"} err="failed to get container status \"a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815\": rpc error: code = NotFound desc = could not find container \"a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815\": container with ID starting with a3456cfef64138928f8e95408e3a79c7b1e83a368fd571f027215a1850fe4815 not found: ID does not exist" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150359 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-config-data\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150422 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150455 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-run-httpd\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150493 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-scripts\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150571 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150660 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-484qf\" (UniqueName: \"kubernetes.io/projected/001a017a-9695-401e-8f31-637949f418cc-kube-api-access-484qf\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.150691 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-log-httpd\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251768 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-484qf\" (UniqueName: \"kubernetes.io/projected/001a017a-9695-401e-8f31-637949f418cc-kube-api-access-484qf\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251824 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-log-httpd\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251870 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-config-data\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251899 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251915 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-run-httpd\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251940 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-scripts\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.251988 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.253583 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-run-httpd\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.256015 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-log-httpd\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.259553 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-scripts\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.260458 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-config-data\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.273539 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.275837 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-484qf\" (UniqueName: \"kubernetes.io/projected/001a017a-9695-401e-8f31-637949f418cc-kube-api-access-484qf\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.295489 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " pod="openstack/ceilometer-0" Nov 25 12:47:52 crc kubenswrapper[4675]: I1125 12:47:52.379293 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:47:53 crc kubenswrapper[4675]: I1125 12:47:53.184048 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:53 crc kubenswrapper[4675]: W1125 12:47:53.184399 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod001a017a_9695_401e_8f31_637949f418cc.slice/crio-82b09628447c7408d20ff0f04e0a88c22bd8c62999c86c4e46d11ca2811bd708 WatchSource:0}: Error finding container 82b09628447c7408d20ff0f04e0a88c22bd8c62999c86c4e46d11ca2811bd708: Status 404 returned error can't find the container with id 82b09628447c7408d20ff0f04e0a88c22bd8c62999c86c4e46d11ca2811bd708 Nov 25 12:47:53 crc kubenswrapper[4675]: I1125 12:47:53.542022 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="631df7ea-47d6-481f-951a-5a2450cf7eb0" path="/var/lib/kubelet/pods/631df7ea-47d6-481f-951a-5a2450cf7eb0/volumes" Nov 25 12:47:53 crc kubenswrapper[4675]: I1125 12:47:53.937939 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerStarted","Data":"985d00e574541629b637ed52c38427d75645a909c063c4a1d4f83c6d6d75866a"} Nov 25 12:47:53 crc kubenswrapper[4675]: I1125 12:47:53.938166 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerStarted","Data":"82b09628447c7408d20ff0f04e0a88c22bd8c62999c86c4e46d11ca2811bd708"} Nov 25 12:47:54 crc kubenswrapper[4675]: I1125 12:47:54.947544 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerStarted","Data":"b7ed964feea56273e9e9886a26b18b30f2ed63a812a6609c6948c9840f15af8a"} Nov 25 12:47:55 crc kubenswrapper[4675]: I1125 12:47:55.605147 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:47:55 crc kubenswrapper[4675]: I1125 12:47:55.957290 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerStarted","Data":"c15646bfc3b38fca287b357ef4573b2188ad9e41906c9d1f5ef933fcf012875a"} Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.464807 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.465422 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-log" containerID="cri-o://9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f" gracePeriod=30 Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.465654 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-httpd" containerID="cri-o://3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe" gracePeriod=30 Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.582486 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6c6c8d8969-kqpxz" Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.671830 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b6974fd78-dvq24"] Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.672092 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-b6974fd78-dvq24" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-api" containerID="cri-o://4186bdd8b758dbd20870a379874e6216cdc010bf1887419d78508f955760ae64" gracePeriod=30 Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.672259 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-b6974fd78-dvq24" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-httpd" containerID="cri-o://f68f77ca3171efb6bccb9c6d2429be22544ed61bfef4ac947736206b86a5bc2e" gracePeriod=30 Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.989637 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerStarted","Data":"00a0dbc4a3df1d9ac6ca93767e627e12dc9fdcde6131549b7ef2b132b83fc6a7"} Nov 25 12:47:56 crc kubenswrapper[4675]: I1125 12:47:56.990969 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:57.001488 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09f8f8d4-54c1-44e2-8bbf-1d561cf78572","Type":"ContainerDied","Data":"9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f"} Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:56.990368 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="sg-core" containerID="cri-o://c15646bfc3b38fca287b357ef4573b2188ad9e41906c9d1f5ef933fcf012875a" gracePeriod=30 Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:56.990350 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="proxy-httpd" containerID="cri-o://00a0dbc4a3df1d9ac6ca93767e627e12dc9fdcde6131549b7ef2b132b83fc6a7" gracePeriod=30 Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:56.989785 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-central-agent" containerID="cri-o://985d00e574541629b637ed52c38427d75645a909c063c4a1d4f83c6d6d75866a" gracePeriod=30 Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:57.000153 4675 generic.go:334] "Generic (PLEG): container finished" podID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerID="9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f" exitCode=143 Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:56.990380 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-notification-agent" containerID="cri-o://b7ed964feea56273e9e9886a26b18b30f2ed63a812a6609c6948c9840f15af8a" gracePeriod=30 Nov 25 12:47:57 crc kubenswrapper[4675]: I1125 12:47:57.050457 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.698437717 podStartE2EDuration="6.050432828s" podCreationTimestamp="2025-11-25 12:47:51 +0000 UTC" firstStartedPulling="2025-11-25 12:47:53.186933536 +0000 UTC m=+1218.358525877" lastFinishedPulling="2025-11-25 12:47:56.538928647 +0000 UTC m=+1221.710520988" observedRunningTime="2025-11-25 12:47:57.034980256 +0000 UTC m=+1222.206572627" watchObservedRunningTime="2025-11-25 12:47:57.050432828 +0000 UTC m=+1222.222025169" Nov 25 12:47:58 crc kubenswrapper[4675]: I1125 12:47:58.012023 4675 generic.go:334] "Generic (PLEG): container finished" podID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerID="f68f77ca3171efb6bccb9c6d2429be22544ed61bfef4ac947736206b86a5bc2e" exitCode=0 Nov 25 12:47:58 crc kubenswrapper[4675]: I1125 12:47:58.012111 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b6974fd78-dvq24" event={"ID":"be6b89d8-f743-4380-9b9b-29b36a55de4b","Type":"ContainerDied","Data":"f68f77ca3171efb6bccb9c6d2429be22544ed61bfef4ac947736206b86a5bc2e"} Nov 25 12:47:58 crc kubenswrapper[4675]: I1125 12:47:58.015639 4675 generic.go:334] "Generic (PLEG): container finished" podID="001a017a-9695-401e-8f31-637949f418cc" containerID="c15646bfc3b38fca287b357ef4573b2188ad9e41906c9d1f5ef933fcf012875a" exitCode=2 Nov 25 12:47:58 crc kubenswrapper[4675]: I1125 12:47:58.015694 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerDied","Data":"c15646bfc3b38fca287b357ef4573b2188ad9e41906c9d1f5ef933fcf012875a"} Nov 25 12:47:58 crc kubenswrapper[4675]: I1125 12:47:58.484066 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="90a143ed-4c09-4ac7-8dd9-869c15e9ef3c" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.173:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:47:58 crc kubenswrapper[4675]: I1125 12:47:58.979498 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 12:47:59 crc kubenswrapper[4675]: I1125 12:47:59.826079 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:47:59 crc kubenswrapper[4675]: I1125 12:47:59.827465 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-log" containerID="cri-o://3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a" gracePeriod=30 Nov 25 12:47:59 crc kubenswrapper[4675]: I1125 12:47:59.827534 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-httpd" containerID="cri-o://f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d" gracePeriod=30 Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.033607 4675 generic.go:334] "Generic (PLEG): container finished" podID="e6f055cd-061d-43e5-8645-e351a7558608" containerID="3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a" exitCode=143 Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.033685 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e6f055cd-061d-43e5-8645-e351a7558608","Type":"ContainerDied","Data":"3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a"} Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.035863 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"4305dd31-2399-4e02-8b99-224a616e8c8c","Type":"ContainerStarted","Data":"927dca14ae8715f9e3293f7118020f6768ca10f4f460bf25e3eb23a8f6eb90df"} Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.053961 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.946972276 podStartE2EDuration="39.053937811s" podCreationTimestamp="2025-11-25 12:47:21 +0000 UTC" firstStartedPulling="2025-11-25 12:47:22.973690058 +0000 UTC m=+1188.145282399" lastFinishedPulling="2025-11-25 12:47:59.080655593 +0000 UTC m=+1224.252247934" observedRunningTime="2025-11-25 12:48:00.048620608 +0000 UTC m=+1225.220212949" watchObservedRunningTime="2025-11-25 12:48:00.053937811 +0000 UTC m=+1225.225530152" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.665603 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782373 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-httpd-run\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782496 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-combined-ca-bundle\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782518 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782557 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqvj7\" (UniqueName: \"kubernetes.io/projected/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-kube-api-access-nqvj7\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782581 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-scripts\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782672 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-config-data\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782712 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-logs\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.782735 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-internal-tls-certs\") pod \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\" (UID: \"09f8f8d4-54c1-44e2-8bbf-1d561cf78572\") " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.783936 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.784182 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-logs" (OuterVolumeSpecName: "logs") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.796063 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.814845 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-kube-api-access-nqvj7" (OuterVolumeSpecName: "kube-api-access-nqvj7") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "kube-api-access-nqvj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.832687 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-scripts" (OuterVolumeSpecName: "scripts") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.837837 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.886196 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.886507 4675 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.886632 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.886741 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.886827 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqvj7\" (UniqueName: \"kubernetes.io/projected/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-kube-api-access-nqvj7\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.886941 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.915741 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-config-data" (OuterVolumeSpecName: "config-data") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.927896 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.948583 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "09f8f8d4-54c1-44e2-8bbf-1d561cf78572" (UID: "09f8f8d4-54c1-44e2-8bbf-1d561cf78572"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.988271 4675 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.988485 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:00 crc kubenswrapper[4675]: I1125 12:48:00.988563 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8f8d4-54c1-44e2-8bbf-1d561cf78572-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.056478 4675 generic.go:334] "Generic (PLEG): container finished" podID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerID="3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe" exitCode=0 Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.056557 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09f8f8d4-54c1-44e2-8bbf-1d561cf78572","Type":"ContainerDied","Data":"3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe"} Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.056590 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09f8f8d4-54c1-44e2-8bbf-1d561cf78572","Type":"ContainerDied","Data":"3db6d63e7ae7145da0caa493a0d83e6641ba8cb92ae898c9ab06ec99ab8d7276"} Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.056611 4675 scope.go:117] "RemoveContainer" containerID="3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.056769 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.076836 4675 generic.go:334] "Generic (PLEG): container finished" podID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerID="4186bdd8b758dbd20870a379874e6216cdc010bf1887419d78508f955760ae64" exitCode=0 Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.076885 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b6974fd78-dvq24" event={"ID":"be6b89d8-f743-4380-9b9b-29b36a55de4b","Type":"ContainerDied","Data":"4186bdd8b758dbd20870a379874e6216cdc010bf1887419d78508f955760ae64"} Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.109178 4675 scope.go:117] "RemoveContainer" containerID="9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.136003 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.178874 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.216995 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:48:01 crc kubenswrapper[4675]: E1125 12:48:01.217514 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-log" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.217582 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-log" Nov 25 12:48:01 crc kubenswrapper[4675]: E1125 12:48:01.217661 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-httpd" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.217712 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-httpd" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.218002 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-log" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.218095 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" containerName="glance-httpd" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.219098 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.223332 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.223841 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.235467 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.253629 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.262043 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.263427 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"5895893fb049f53c9c3d5b457ebc6cc8920b7a0658b046ead35f78944694da4f"} pod="openstack/horizon-6df5497f4d-4g9tv" containerMessage="Container horizon failed startup probe, will be restarted" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.265548 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" containerID="cri-o://5895893fb049f53c9c3d5b457ebc6cc8920b7a0658b046ead35f78944694da4f" gracePeriod=30 Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.266440 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.280867 4675 scope.go:117] "RemoveContainer" containerID="3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe" Nov 25 12:48:01 crc kubenswrapper[4675]: E1125 12:48:01.286261 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe\": container with ID starting with 3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe not found: ID does not exist" containerID="3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.286323 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe"} err="failed to get container status \"3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe\": rpc error: code = NotFound desc = could not find container \"3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe\": container with ID starting with 3f35e9063366f63faf779504e862ddf57f9a9faf6b0d655d4adc9a5db2f239fe not found: ID does not exist" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.286367 4675 scope.go:117] "RemoveContainer" containerID="9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f" Nov 25 12:48:01 crc kubenswrapper[4675]: E1125 12:48:01.287354 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f\": container with ID starting with 9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f not found: ID does not exist" containerID="9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.287378 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f"} err="failed to get container status \"9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f\": rpc error: code = NotFound desc = could not find container \"9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f\": container with ID starting with 9d10935c23c3199eabbba9e44d8042d2d0ec2918ce4a442b28cf19d10bd4be4f not found: ID does not exist" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298067 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d478f76e-2629-426c-8a29-60b4cce437f2-logs\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298116 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggt5k\" (UniqueName: \"kubernetes.io/projected/d478f76e-2629-426c-8a29-60b4cce437f2-kube-api-access-ggt5k\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298168 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298188 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298218 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298234 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d478f76e-2629-426c-8a29-60b4cce437f2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298252 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.298309 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.375468 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.375536 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.376291 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"a8a62130160dd33e971232bb74e27eaa3176dab69a60bc1e0084e518a0762ce4"} pod="openstack/horizon-85d4f84f96-fcncp" containerMessage="Container horizon failed startup probe, will be restarted" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.376318 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" containerID="cri-o://a8a62130160dd33e971232bb74e27eaa3176dab69a60bc1e0084e518a0762ce4" gracePeriod=30 Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.399170 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-httpd-config\") pod \"be6b89d8-f743-4380-9b9b-29b36a55de4b\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.399250 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-combined-ca-bundle\") pod \"be6b89d8-f743-4380-9b9b-29b36a55de4b\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.399455 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-config\") pod \"be6b89d8-f743-4380-9b9b-29b36a55de4b\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.399484 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz8sq\" (UniqueName: \"kubernetes.io/projected/be6b89d8-f743-4380-9b9b-29b36a55de4b-kube-api-access-lz8sq\") pod \"be6b89d8-f743-4380-9b9b-29b36a55de4b\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.400071 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-ovndb-tls-certs\") pod \"be6b89d8-f743-4380-9b9b-29b36a55de4b\" (UID: \"be6b89d8-f743-4380-9b9b-29b36a55de4b\") " Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.400588 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.400672 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.400765 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.400858 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d478f76e-2629-426c-8a29-60b4cce437f2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.400942 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.401115 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.401243 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d478f76e-2629-426c-8a29-60b4cce437f2-logs\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.401327 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggt5k\" (UniqueName: \"kubernetes.io/projected/d478f76e-2629-426c-8a29-60b4cce437f2-kube-api-access-ggt5k\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.402478 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.409410 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d478f76e-2629-426c-8a29-60b4cce437f2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.412589 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d478f76e-2629-426c-8a29-60b4cce437f2-logs\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.412751 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "be6b89d8-f743-4380-9b9b-29b36a55de4b" (UID: "be6b89d8-f743-4380-9b9b-29b36a55de4b"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.414649 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.415545 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.422286 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be6b89d8-f743-4380-9b9b-29b36a55de4b-kube-api-access-lz8sq" (OuterVolumeSpecName: "kube-api-access-lz8sq") pod "be6b89d8-f743-4380-9b9b-29b36a55de4b" (UID: "be6b89d8-f743-4380-9b9b-29b36a55de4b"). InnerVolumeSpecName "kube-api-access-lz8sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.433498 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.440653 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d478f76e-2629-426c-8a29-60b4cce437f2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.484537 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.495291 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggt5k\" (UniqueName: \"kubernetes.io/projected/d478f76e-2629-426c-8a29-60b4cce437f2-kube-api-access-ggt5k\") pod \"glance-default-internal-api-0\" (UID: \"d478f76e-2629-426c-8a29-60b4cce437f2\") " pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.501892 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-config" (OuterVolumeSpecName: "config") pod "be6b89d8-f743-4380-9b9b-29b36a55de4b" (UID: "be6b89d8-f743-4380-9b9b-29b36a55de4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.502977 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.502993 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz8sq\" (UniqueName: \"kubernetes.io/projected/be6b89d8-f743-4380-9b9b-29b36a55de4b-kube-api-access-lz8sq\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.503003 4675 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.516769 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be6b89d8-f743-4380-9b9b-29b36a55de4b" (UID: "be6b89d8-f743-4380-9b9b-29b36a55de4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.545888 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09f8f8d4-54c1-44e2-8bbf-1d561cf78572" path="/var/lib/kubelet/pods/09f8f8d4-54c1-44e2-8bbf-1d561cf78572/volumes" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.573539 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.583425 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "be6b89d8-f743-4380-9b9b-29b36a55de4b" (UID: "be6b89d8-f743-4380-9b9b-29b36a55de4b"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.604582 4675 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.604614 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be6b89d8-f743-4380-9b9b-29b36a55de4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:01 crc kubenswrapper[4675]: W1125 12:48:01.949108 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd478f76e_2629_426c_8a29_60b4cce437f2.slice/crio-ea5e7d27efb7b7ce31cea949b1ba510e460c34bd9bcb1ce250ca4e7525a8e186 WatchSource:0}: Error finding container ea5e7d27efb7b7ce31cea949b1ba510e460c34bd9bcb1ce250ca4e7525a8e186: Status 404 returned error can't find the container with id ea5e7d27efb7b7ce31cea949b1ba510e460c34bd9bcb1ce250ca4e7525a8e186 Nov 25 12:48:01 crc kubenswrapper[4675]: I1125 12:48:01.957373 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.096252 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b6974fd78-dvq24" event={"ID":"be6b89d8-f743-4380-9b9b-29b36a55de4b","Type":"ContainerDied","Data":"ca9d7f40f5ba143b2a937994a579a67f70b2aa334a0a719c217e36d1ad44159f"} Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.096327 4675 scope.go:117] "RemoveContainer" containerID="f68f77ca3171efb6bccb9c6d2429be22544ed61bfef4ac947736206b86a5bc2e" Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.096512 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b6974fd78-dvq24" Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.122127 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d478f76e-2629-426c-8a29-60b4cce437f2","Type":"ContainerStarted","Data":"ea5e7d27efb7b7ce31cea949b1ba510e460c34bd9bcb1ce250ca4e7525a8e186"} Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.181903 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b6974fd78-dvq24"] Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.188174 4675 scope.go:117] "RemoveContainer" containerID="4186bdd8b758dbd20870a379874e6216cdc010bf1887419d78508f955760ae64" Nov 25 12:48:02 crc kubenswrapper[4675]: I1125 12:48:02.195104 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b6974fd78-dvq24"] Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.134109 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d478f76e-2629-426c-8a29-60b4cce437f2","Type":"ContainerStarted","Data":"3ae74a8ad449f0e8d4c71a1059c6f872d56f3a868724ee54b8f2947468d742b4"} Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.296527 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.154:9292/healthcheck\": read tcp 10.217.0.2:37834->10.217.0.154:9292: read: connection reset by peer" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.296544 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.154:9292/healthcheck\": read tcp 10.217.0.2:37822->10.217.0.154:9292: read: connection reset by peer" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.490326 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="90a143ed-4c09-4ac7-8dd9-869c15e9ef3c" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.173:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.547591 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" path="/var/lib/kubelet/pods/be6b89d8-f743-4380-9b9b-29b36a55de4b/volumes" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.821087 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.848556 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-logs\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.849449 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-scripts\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.850350 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.850475 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9gj4\" (UniqueName: \"kubernetes.io/projected/e6f055cd-061d-43e5-8645-e351a7558608-kube-api-access-w9gj4\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.850585 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-config-data\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.850671 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-combined-ca-bundle\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.851027 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-public-tls-certs\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.851140 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-httpd-run\") pod \"e6f055cd-061d-43e5-8645-e351a7558608\" (UID: \"e6f055cd-061d-43e5-8645-e351a7558608\") " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.849309 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-logs" (OuterVolumeSpecName: "logs") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.851619 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.854356 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.860529 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-scripts" (OuterVolumeSpecName: "scripts") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.869238 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6f055cd-061d-43e5-8645-e351a7558608-kube-api-access-w9gj4" (OuterVolumeSpecName: "kube-api-access-w9gj4") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "kube-api-access-w9gj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.870512 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.938935 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.953249 4675 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6f055cd-061d-43e5-8645-e351a7558608-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.966098 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.966328 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.966500 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9gj4\" (UniqueName: \"kubernetes.io/projected/e6f055cd-061d-43e5-8645-e351a7558608-kube-api-access-w9gj4\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:03 crc kubenswrapper[4675]: I1125 12:48:03.966570 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.013947 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.041176 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.064201 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-config-data" (OuterVolumeSpecName: "config-data") pod "e6f055cd-061d-43e5-8645-e351a7558608" (UID: "e6f055cd-061d-43e5-8645-e351a7558608"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.075190 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.075226 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.075238 4675 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6f055cd-061d-43e5-8645-e351a7558608-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.145143 4675 generic.go:334] "Generic (PLEG): container finished" podID="e6f055cd-061d-43e5-8645-e351a7558608" containerID="f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d" exitCode=0 Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.145223 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.145236 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e6f055cd-061d-43e5-8645-e351a7558608","Type":"ContainerDied","Data":"f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d"} Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.146341 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e6f055cd-061d-43e5-8645-e351a7558608","Type":"ContainerDied","Data":"ed6048ecb1edf39a412bc459cfe0777b9de4e7d9823f4ed58a287a6a1910bfbb"} Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.146388 4675 scope.go:117] "RemoveContainer" containerID="f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.148697 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d478f76e-2629-426c-8a29-60b4cce437f2","Type":"ContainerStarted","Data":"4fabcbcdfa3a2453e9dfa19b89f0eecbfb12bdbeec67121c60f02c9aea0b49aa"} Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.177537 4675 scope.go:117] "RemoveContainer" containerID="3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.179898 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.179877256 podStartE2EDuration="3.179877256s" podCreationTimestamp="2025-11-25 12:48:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:48:04.175921969 +0000 UTC m=+1229.347514320" watchObservedRunningTime="2025-11-25 12:48:04.179877256 +0000 UTC m=+1229.351469597" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.191781 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.200800 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.232586 4675 scope.go:117] "RemoveContainer" containerID="f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d" Nov 25 12:48:04 crc kubenswrapper[4675]: E1125 12:48:04.235406 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d\": container with ID starting with f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d not found: ID does not exist" containerID="f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.235460 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d"} err="failed to get container status \"f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d\": rpc error: code = NotFound desc = could not find container \"f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d\": container with ID starting with f68a807006687570049382fd204f96b43fc4000b7bcc7fc5fa764cefb8e3682d not found: ID does not exist" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.235493 4675 scope.go:117] "RemoveContainer" containerID="3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a" Nov 25 12:48:04 crc kubenswrapper[4675]: E1125 12:48:04.236513 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a\": container with ID starting with 3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a not found: ID does not exist" containerID="3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.236538 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a"} err="failed to get container status \"3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a\": rpc error: code = NotFound desc = could not find container \"3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a\": container with ID starting with 3dce4621680162c8fa5ce1dc52e6f7584b021727804edff470b3751a05fa002a not found: ID does not exist" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240059 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:48:04 crc kubenswrapper[4675]: E1125 12:48:04.240455 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-api" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240472 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-api" Nov 25 12:48:04 crc kubenswrapper[4675]: E1125 12:48:04.240646 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-httpd" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240653 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-httpd" Nov 25 12:48:04 crc kubenswrapper[4675]: E1125 12:48:04.240694 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-httpd" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240702 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-httpd" Nov 25 12:48:04 crc kubenswrapper[4675]: E1125 12:48:04.240713 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-log" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240719 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-log" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240892 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-httpd" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240906 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-httpd" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240921 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="be6b89d8-f743-4380-9b9b-29b36a55de4b" containerName="neutron-api" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.240933 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6f055cd-061d-43e5-8645-e351a7558608" containerName="glance-log" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.242027 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.246240 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.246420 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.262708 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.394374 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.394719 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d10980d-6f3c-4a3f-a4ce-30e07d985393-logs\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.394756 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-scripts\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.394931 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8jtl\" (UniqueName: \"kubernetes.io/projected/5d10980d-6f3c-4a3f-a4ce-30e07d985393-kube-api-access-l8jtl\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.394959 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-config-data\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.394984 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.395047 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d10980d-6f3c-4a3f-a4ce-30e07d985393-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.395121 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496017 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-scripts\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496124 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8jtl\" (UniqueName: \"kubernetes.io/projected/5d10980d-6f3c-4a3f-a4ce-30e07d985393-kube-api-access-l8jtl\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496147 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-config-data\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496164 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496199 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d10980d-6f3c-4a3f-a4ce-30e07d985393-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496240 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496272 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d10980d-6f3c-4a3f-a4ce-30e07d985393-logs\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496287 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496714 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d10980d-6f3c-4a3f-a4ce-30e07d985393-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.496947 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.497239 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d10980d-6f3c-4a3f-a4ce-30e07d985393-logs\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.507425 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.507562 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-scripts\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.513441 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.515636 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d10980d-6f3c-4a3f-a4ce-30e07d985393-config-data\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.533502 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8jtl\" (UniqueName: \"kubernetes.io/projected/5d10980d-6f3c-4a3f-a4ce-30e07d985393-kube-api-access-l8jtl\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.562136 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5d10980d-6f3c-4a3f-a4ce-30e07d985393\") " pod="openstack/glance-default-external-api-0" Nov 25 12:48:04 crc kubenswrapper[4675]: I1125 12:48:04.859337 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 12:48:05 crc kubenswrapper[4675]: I1125 12:48:05.525559 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 12:48:05 crc kubenswrapper[4675]: I1125 12:48:05.552615 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6f055cd-061d-43e5-8645-e351a7558608" path="/var/lib/kubelet/pods/e6f055cd-061d-43e5-8645-e351a7558608/volumes" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.180261 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d10980d-6f3c-4a3f-a4ce-30e07d985393","Type":"ContainerStarted","Data":"244d1790c34030a404fcebc73975584222a19114787d8855f313474d6bd5a563"} Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.423403 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-5646n"] Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.424498 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.432280 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxth7\" (UniqueName: \"kubernetes.io/projected/d11cfa02-10db-4ea2-a713-bd7b18d1c65d-kube-api-access-bxth7\") pod \"nova-api-db-create-5646n\" (UID: \"d11cfa02-10db-4ea2-a713-bd7b18d1c65d\") " pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.441234 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5646n"] Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.543201 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxth7\" (UniqueName: \"kubernetes.io/projected/d11cfa02-10db-4ea2-a713-bd7b18d1c65d-kube-api-access-bxth7\") pod \"nova-api-db-create-5646n\" (UID: \"d11cfa02-10db-4ea2-a713-bd7b18d1c65d\") " pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.574695 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-8v95j"] Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.627127 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.630619 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-8v95j"] Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.643310 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxth7\" (UniqueName: \"kubernetes.io/projected/d11cfa02-10db-4ea2-a713-bd7b18d1c65d-kube-api-access-bxth7\") pod \"nova-api-db-create-5646n\" (UID: \"d11cfa02-10db-4ea2-a713-bd7b18d1c65d\") " pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.652968 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdw8l\" (UniqueName: \"kubernetes.io/projected/06abd7e9-ef14-4166-9897-27f51471ee36-kube-api-access-bdw8l\") pod \"nova-cell0-db-create-8v95j\" (UID: \"06abd7e9-ef14-4166-9897-27f51471ee36\") " pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.746085 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-q7g4j"] Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.747259 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.749574 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-q7g4j"] Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.787444 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.788174 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdw8l\" (UniqueName: \"kubernetes.io/projected/06abd7e9-ef14-4166-9897-27f51471ee36-kube-api-access-bdw8l\") pod \"nova-cell0-db-create-8v95j\" (UID: \"06abd7e9-ef14-4166-9897-27f51471ee36\") " pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.788236 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47kd8\" (UniqueName: \"kubernetes.io/projected/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce-kube-api-access-47kd8\") pod \"nova-cell1-db-create-q7g4j\" (UID: \"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce\") " pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.892642 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47kd8\" (UniqueName: \"kubernetes.io/projected/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce-kube-api-access-47kd8\") pod \"nova-cell1-db-create-q7g4j\" (UID: \"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce\") " pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.893615 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdw8l\" (UniqueName: \"kubernetes.io/projected/06abd7e9-ef14-4166-9897-27f51471ee36-kube-api-access-bdw8l\") pod \"nova-cell0-db-create-8v95j\" (UID: \"06abd7e9-ef14-4166-9897-27f51471ee36\") " pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.945376 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47kd8\" (UniqueName: \"kubernetes.io/projected/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce-kube-api-access-47kd8\") pod \"nova-cell1-db-create-q7g4j\" (UID: \"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce\") " pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:06 crc kubenswrapper[4675]: I1125 12:48:06.982534 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:07 crc kubenswrapper[4675]: I1125 12:48:07.149699 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:07 crc kubenswrapper[4675]: I1125 12:48:07.217760 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d10980d-6f3c-4a3f-a4ce-30e07d985393","Type":"ContainerStarted","Data":"1473b9f40c860716389401ef427cf7c7d4103e03db706e63dc2091a010ad698d"} Nov 25 12:48:07 crc kubenswrapper[4675]: I1125 12:48:07.559721 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5646n"] Nov 25 12:48:07 crc kubenswrapper[4675]: I1125 12:48:07.716270 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-8v95j"] Nov 25 12:48:07 crc kubenswrapper[4675]: I1125 12:48:07.940589 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-q7g4j"] Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.228659 4675 generic.go:334] "Generic (PLEG): container finished" podID="d11cfa02-10db-4ea2-a713-bd7b18d1c65d" containerID="f5b712a84cc78eea396104e526aca481eabc08f8d1dcc5bd70d1d4daad441783" exitCode=0 Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.228729 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5646n" event={"ID":"d11cfa02-10db-4ea2-a713-bd7b18d1c65d","Type":"ContainerDied","Data":"f5b712a84cc78eea396104e526aca481eabc08f8d1dcc5bd70d1d4daad441783"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.228754 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5646n" event={"ID":"d11cfa02-10db-4ea2-a713-bd7b18d1c65d","Type":"ContainerStarted","Data":"81a64aa7243b366021098b58441d203614ca779257bb344fe98a7c0cf600f828"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.230891 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q7g4j" event={"ID":"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce","Type":"ContainerStarted","Data":"87b8d2e878db95a227a55dacefb33f9defc9bf9a7547455a7b2b9e6a5b1cf07e"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.230932 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q7g4j" event={"ID":"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce","Type":"ContainerStarted","Data":"7d5052c033b377c57812c234ba451b3938bab7c9b3efcef03f758dfa9c7e3118"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.232394 4675 generic.go:334] "Generic (PLEG): container finished" podID="06abd7e9-ef14-4166-9897-27f51471ee36" containerID="0af2c2716a15891566813fd756a3847715e200356572cc057335c5ab988fbf4f" exitCode=0 Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.232432 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8v95j" event={"ID":"06abd7e9-ef14-4166-9897-27f51471ee36","Type":"ContainerDied","Data":"0af2c2716a15891566813fd756a3847715e200356572cc057335c5ab988fbf4f"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.232459 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8v95j" event={"ID":"06abd7e9-ef14-4166-9897-27f51471ee36","Type":"ContainerStarted","Data":"f38d18a42e9288ccc280b3d22ce2c02c69956ce155d46907b3a66732a4a35f88"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.234357 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d10980d-6f3c-4a3f-a4ce-30e07d985393","Type":"ContainerStarted","Data":"484ea0367650606556945ac1735869d2b35c417f217eb60b216d736c5b9dae5e"} Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.284885 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.284866833 podStartE2EDuration="4.284866833s" podCreationTimestamp="2025-11-25 12:48:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:48:08.282157797 +0000 UTC m=+1233.453750138" watchObservedRunningTime="2025-11-25 12:48:08.284866833 +0000 UTC m=+1233.456459164" Nov 25 12:48:08 crc kubenswrapper[4675]: I1125 12:48:08.296468 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-q7g4j" podStartSLOduration=2.296450858 podStartE2EDuration="2.296450858s" podCreationTimestamp="2025-11-25 12:48:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:48:08.294843567 +0000 UTC m=+1233.466435908" watchObservedRunningTime="2025-11-25 12:48:08.296450858 +0000 UTC m=+1233.468043199" Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.252381 4675 generic.go:334] "Generic (PLEG): container finished" podID="4f517bd1-b71d-4bd5-8570-dda1efd3e4ce" containerID="87b8d2e878db95a227a55dacefb33f9defc9bf9a7547455a7b2b9e6a5b1cf07e" exitCode=0 Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.252915 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q7g4j" event={"ID":"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce","Type":"ContainerDied","Data":"87b8d2e878db95a227a55dacefb33f9defc9bf9a7547455a7b2b9e6a5b1cf07e"} Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.780061 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.790484 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.858679 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdw8l\" (UniqueName: \"kubernetes.io/projected/06abd7e9-ef14-4166-9897-27f51471ee36-kube-api-access-bdw8l\") pod \"06abd7e9-ef14-4166-9897-27f51471ee36\" (UID: \"06abd7e9-ef14-4166-9897-27f51471ee36\") " Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.858842 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxth7\" (UniqueName: \"kubernetes.io/projected/d11cfa02-10db-4ea2-a713-bd7b18d1c65d-kube-api-access-bxth7\") pod \"d11cfa02-10db-4ea2-a713-bd7b18d1c65d\" (UID: \"d11cfa02-10db-4ea2-a713-bd7b18d1c65d\") " Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.882922 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d11cfa02-10db-4ea2-a713-bd7b18d1c65d-kube-api-access-bxth7" (OuterVolumeSpecName: "kube-api-access-bxth7") pod "d11cfa02-10db-4ea2-a713-bd7b18d1c65d" (UID: "d11cfa02-10db-4ea2-a713-bd7b18d1c65d"). InnerVolumeSpecName "kube-api-access-bxth7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.883838 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06abd7e9-ef14-4166-9897-27f51471ee36-kube-api-access-bdw8l" (OuterVolumeSpecName: "kube-api-access-bdw8l") pod "06abd7e9-ef14-4166-9897-27f51471ee36" (UID: "06abd7e9-ef14-4166-9897-27f51471ee36"). InnerVolumeSpecName "kube-api-access-bdw8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.963059 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdw8l\" (UniqueName: \"kubernetes.io/projected/06abd7e9-ef14-4166-9897-27f51471ee36-kube-api-access-bdw8l\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:09 crc kubenswrapper[4675]: I1125 12:48:09.963093 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxth7\" (UniqueName: \"kubernetes.io/projected/d11cfa02-10db-4ea2-a713-bd7b18d1c65d-kube-api-access-bxth7\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.266239 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8v95j" event={"ID":"06abd7e9-ef14-4166-9897-27f51471ee36","Type":"ContainerDied","Data":"f38d18a42e9288ccc280b3d22ce2c02c69956ce155d46907b3a66732a4a35f88"} Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.266319 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f38d18a42e9288ccc280b3d22ce2c02c69956ce155d46907b3a66732a4a35f88" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.266332 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8v95j" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.273427 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5646n" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.273524 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5646n" event={"ID":"d11cfa02-10db-4ea2-a713-bd7b18d1c65d","Type":"ContainerDied","Data":"81a64aa7243b366021098b58441d203614ca779257bb344fe98a7c0cf600f828"} Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.273576 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81a64aa7243b366021098b58441d203614ca779257bb344fe98a7c0cf600f828" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.726380 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.780366 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47kd8\" (UniqueName: \"kubernetes.io/projected/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce-kube-api-access-47kd8\") pod \"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce\" (UID: \"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce\") " Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.785737 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce-kube-api-access-47kd8" (OuterVolumeSpecName: "kube-api-access-47kd8") pod "4f517bd1-b71d-4bd5-8570-dda1efd3e4ce" (UID: "4f517bd1-b71d-4bd5-8570-dda1efd3e4ce"). InnerVolumeSpecName "kube-api-access-47kd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:10 crc kubenswrapper[4675]: I1125 12:48:10.883019 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47kd8\" (UniqueName: \"kubernetes.io/projected/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce-kube-api-access-47kd8\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.285612 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q7g4j" event={"ID":"4f517bd1-b71d-4bd5-8570-dda1efd3e4ce","Type":"ContainerDied","Data":"7d5052c033b377c57812c234ba451b3938bab7c9b3efcef03f758dfa9c7e3118"} Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.285880 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d5052c033b377c57812c234ba451b3938bab7c9b3efcef03f758dfa9c7e3118" Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.285930 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q7g4j" Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.575368 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.575411 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.606095 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:11 crc kubenswrapper[4675]: I1125 12:48:11.629987 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:12 crc kubenswrapper[4675]: I1125 12:48:12.293959 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:12 crc kubenswrapper[4675]: I1125 12:48:12.294014 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.523151 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.524503 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.619709 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.859588 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.859633 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.894734 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 12:48:14 crc kubenswrapper[4675]: I1125 12:48:14.907693 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 12:48:15 crc kubenswrapper[4675]: I1125 12:48:15.318359 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 12:48:15 crc kubenswrapper[4675]: I1125 12:48:15.318723 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.678937 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-e721-account-create-hv52z"] Nov 25 12:48:16 crc kubenswrapper[4675]: E1125 12:48:16.679567 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06abd7e9-ef14-4166-9897-27f51471ee36" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.679582 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="06abd7e9-ef14-4166-9897-27f51471ee36" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: E1125 12:48:16.679602 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11cfa02-10db-4ea2-a713-bd7b18d1c65d" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.679608 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11cfa02-10db-4ea2-a713-bd7b18d1c65d" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: E1125 12:48:16.679641 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f517bd1-b71d-4bd5-8570-dda1efd3e4ce" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.679648 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f517bd1-b71d-4bd5-8570-dda1efd3e4ce" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.679861 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="d11cfa02-10db-4ea2-a713-bd7b18d1c65d" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.679885 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f517bd1-b71d-4bd5-8570-dda1efd3e4ce" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.679897 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="06abd7e9-ef14-4166-9897-27f51471ee36" containerName="mariadb-database-create" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.680475 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.683538 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.707618 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e721-account-create-hv52z"] Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.794203 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns2qk\" (UniqueName: \"kubernetes.io/projected/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5-kube-api-access-ns2qk\") pod \"nova-api-e721-account-create-hv52z\" (UID: \"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5\") " pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.862964 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-79eb-account-create-8tls9"] Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.864081 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.876938 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-79eb-account-create-8tls9"] Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.880414 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.897635 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns2qk\" (UniqueName: \"kubernetes.io/projected/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5-kube-api-access-ns2qk\") pod \"nova-api-e721-account-create-hv52z\" (UID: \"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5\") " pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.921860 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns2qk\" (UniqueName: \"kubernetes.io/projected/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5-kube-api-access-ns2qk\") pod \"nova-api-e721-account-create-hv52z\" (UID: \"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5\") " pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:16 crc kubenswrapper[4675]: I1125 12:48:16.999742 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz9k8\" (UniqueName: \"kubernetes.io/projected/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e-kube-api-access-fz9k8\") pod \"nova-cell0-79eb-account-create-8tls9\" (UID: \"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e\") " pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.019308 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.087314 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-46b5-account-create-lxxth"] Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.088481 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.095158 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.099464 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-46b5-account-create-lxxth"] Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.105002 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz9k8\" (UniqueName: \"kubernetes.io/projected/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e-kube-api-access-fz9k8\") pod \"nova-cell0-79eb-account-create-8tls9\" (UID: \"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e\") " pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.152888 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz9k8\" (UniqueName: \"kubernetes.io/projected/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e-kube-api-access-fz9k8\") pod \"nova-cell0-79eb-account-create-8tls9\" (UID: \"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e\") " pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.180061 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.206466 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85cbc\" (UniqueName: \"kubernetes.io/projected/bf34c61d-c354-49a2-9956-3e3f0c09f6c3-kube-api-access-85cbc\") pod \"nova-cell1-46b5-account-create-lxxth\" (UID: \"bf34c61d-c354-49a2-9956-3e3f0c09f6c3\") " pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.308317 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85cbc\" (UniqueName: \"kubernetes.io/projected/bf34c61d-c354-49a2-9956-3e3f0c09f6c3-kube-api-access-85cbc\") pod \"nova-cell1-46b5-account-create-lxxth\" (UID: \"bf34c61d-c354-49a2-9956-3e3f0c09f6c3\") " pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.327796 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85cbc\" (UniqueName: \"kubernetes.io/projected/bf34c61d-c354-49a2-9956-3e3f0c09f6c3-kube-api-access-85cbc\") pod \"nova-cell1-46b5-account-create-lxxth\" (UID: \"bf34c61d-c354-49a2-9956-3e3f0c09f6c3\") " pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.493145 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.696271 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e721-account-create-hv52z"] Nov 25 12:48:17 crc kubenswrapper[4675]: W1125 12:48:17.712060 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7dc2e46_7016_47c0_8074_9a5aa01dbdb5.slice/crio-3c458318721d43920445083d0da1c7dfb6b9508d975c2a29458a3f54a4da0f27 WatchSource:0}: Error finding container 3c458318721d43920445083d0da1c7dfb6b9508d975c2a29458a3f54a4da0f27: Status 404 returned error can't find the container with id 3c458318721d43920445083d0da1c7dfb6b9508d975c2a29458a3f54a4da0f27 Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.801509 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-79eb-account-create-8tls9"] Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.877832 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.877920 4675 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 12:48:17 crc kubenswrapper[4675]: I1125 12:48:17.880767 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.008342 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-46b5-account-create-lxxth"] Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.368569 4675 generic.go:334] "Generic (PLEG): container finished" podID="9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e" containerID="beb4334708cf53ccd644d9475bfcefa3412a4b1b6d74b22ef01aaa3af33f9f18" exitCode=0 Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.368651 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-79eb-account-create-8tls9" event={"ID":"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e","Type":"ContainerDied","Data":"beb4334708cf53ccd644d9475bfcefa3412a4b1b6d74b22ef01aaa3af33f9f18"} Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.368873 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-79eb-account-create-8tls9" event={"ID":"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e","Type":"ContainerStarted","Data":"cc7b0ac9fcecdf40a2b6736bd3abd8328363eacfbbf739006c40712934b3b3a3"} Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.371049 4675 generic.go:334] "Generic (PLEG): container finished" podID="bf34c61d-c354-49a2-9956-3e3f0c09f6c3" containerID="3d2f5d4f805c6b32548d3269cb8b5d27a9b6ffe117487ebfa3d76979ab7ac3d0" exitCode=0 Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.371100 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-46b5-account-create-lxxth" event={"ID":"bf34c61d-c354-49a2-9956-3e3f0c09f6c3","Type":"ContainerDied","Data":"3d2f5d4f805c6b32548d3269cb8b5d27a9b6ffe117487ebfa3d76979ab7ac3d0"} Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.371126 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-46b5-account-create-lxxth" event={"ID":"bf34c61d-c354-49a2-9956-3e3f0c09f6c3","Type":"ContainerStarted","Data":"71fbeb6eeb3d6aea0dae95432d50f9c0afeec8f6140f77aadc5de0f6f87fc39d"} Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.373173 4675 generic.go:334] "Generic (PLEG): container finished" podID="b7dc2e46-7016-47c0-8074-9a5aa01dbdb5" containerID="a390ef7b9cc37457e5c549631b8fb2d772a9e241e9a82d94c1cd4b7526da8b9c" exitCode=0 Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.373492 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e721-account-create-hv52z" event={"ID":"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5","Type":"ContainerDied","Data":"a390ef7b9cc37457e5c549631b8fb2d772a9e241e9a82d94c1cd4b7526da8b9c"} Nov 25 12:48:18 crc kubenswrapper[4675]: I1125 12:48:18.373523 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e721-account-create-hv52z" event={"ID":"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5","Type":"ContainerStarted","Data":"3c458318721d43920445083d0da1c7dfb6b9508d975c2a29458a3f54a4da0f27"} Nov 25 12:48:19 crc kubenswrapper[4675]: I1125 12:48:19.948713 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:19 crc kubenswrapper[4675]: I1125 12:48:19.960149 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:19 crc kubenswrapper[4675]: I1125 12:48:19.966450 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.059034 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fz9k8\" (UniqueName: \"kubernetes.io/projected/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e-kube-api-access-fz9k8\") pod \"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e\" (UID: \"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e\") " Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.059183 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ns2qk\" (UniqueName: \"kubernetes.io/projected/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5-kube-api-access-ns2qk\") pod \"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5\" (UID: \"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5\") " Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.059242 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85cbc\" (UniqueName: \"kubernetes.io/projected/bf34c61d-c354-49a2-9956-3e3f0c09f6c3-kube-api-access-85cbc\") pod \"bf34c61d-c354-49a2-9956-3e3f0c09f6c3\" (UID: \"bf34c61d-c354-49a2-9956-3e3f0c09f6c3\") " Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.064380 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf34c61d-c354-49a2-9956-3e3f0c09f6c3-kube-api-access-85cbc" (OuterVolumeSpecName: "kube-api-access-85cbc") pod "bf34c61d-c354-49a2-9956-3e3f0c09f6c3" (UID: "bf34c61d-c354-49a2-9956-3e3f0c09f6c3"). InnerVolumeSpecName "kube-api-access-85cbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.064551 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5-kube-api-access-ns2qk" (OuterVolumeSpecName: "kube-api-access-ns2qk") pod "b7dc2e46-7016-47c0-8074-9a5aa01dbdb5" (UID: "b7dc2e46-7016-47c0-8074-9a5aa01dbdb5"). InnerVolumeSpecName "kube-api-access-ns2qk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.065506 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e-kube-api-access-fz9k8" (OuterVolumeSpecName: "kube-api-access-fz9k8") pod "9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e" (UID: "9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e"). InnerVolumeSpecName "kube-api-access-fz9k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.161241 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fz9k8\" (UniqueName: \"kubernetes.io/projected/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e-kube-api-access-fz9k8\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.161271 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ns2qk\" (UniqueName: \"kubernetes.io/projected/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5-kube-api-access-ns2qk\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.161281 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85cbc\" (UniqueName: \"kubernetes.io/projected/bf34c61d-c354-49a2-9956-3e3f0c09f6c3-kube-api-access-85cbc\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.401140 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-46b5-account-create-lxxth" event={"ID":"bf34c61d-c354-49a2-9956-3e3f0c09f6c3","Type":"ContainerDied","Data":"71fbeb6eeb3d6aea0dae95432d50f9c0afeec8f6140f77aadc5de0f6f87fc39d"} Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.401189 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71fbeb6eeb3d6aea0dae95432d50f9c0afeec8f6140f77aadc5de0f6f87fc39d" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.401238 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-46b5-account-create-lxxth" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.415407 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e721-account-create-hv52z" event={"ID":"b7dc2e46-7016-47c0-8074-9a5aa01dbdb5","Type":"ContainerDied","Data":"3c458318721d43920445083d0da1c7dfb6b9508d975c2a29458a3f54a4da0f27"} Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.415440 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c458318721d43920445083d0da1c7dfb6b9508d975c2a29458a3f54a4da0f27" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.415508 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e721-account-create-hv52z" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.419424 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-79eb-account-create-8tls9" event={"ID":"9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e","Type":"ContainerDied","Data":"cc7b0ac9fcecdf40a2b6736bd3abd8328363eacfbbf739006c40712934b3b3a3"} Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.419455 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc7b0ac9fcecdf40a2b6736bd3abd8328363eacfbbf739006c40712934b3b3a3" Nov 25 12:48:20 crc kubenswrapper[4675]: I1125 12:48:20.419508 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-79eb-account-create-8tls9" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.211908 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr22c"] Nov 25 12:48:22 crc kubenswrapper[4675]: E1125 12:48:22.214510 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7dc2e46-7016-47c0-8074-9a5aa01dbdb5" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.214717 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7dc2e46-7016-47c0-8074-9a5aa01dbdb5" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: E1125 12:48:22.214804 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf34c61d-c354-49a2-9956-3e3f0c09f6c3" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.214907 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf34c61d-c354-49a2-9956-3e3f0c09f6c3" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: E1125 12:48:22.215046 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.215129 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.215511 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.215619 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7dc2e46-7016-47c0-8074-9a5aa01dbdb5" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.215702 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf34c61d-c354-49a2-9956-3e3f0c09f6c3" containerName="mariadb-account-create" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.217106 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.219947 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-px4x4" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.223469 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.223489 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.248632 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr22c"] Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.297135 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-config-data\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.297494 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.297679 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-scripts\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.297916 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdfv6\" (UniqueName: \"kubernetes.io/projected/0babe492-76ca-4609-b35f-e1d613d06078-kube-api-access-xdfv6\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.383880 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.400001 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdfv6\" (UniqueName: \"kubernetes.io/projected/0babe492-76ca-4609-b35f-e1d613d06078-kube-api-access-xdfv6\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.400101 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-config-data\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.400129 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.400192 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-scripts\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.406512 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.407540 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-config-data\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.409405 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-scripts\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.419594 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdfv6\" (UniqueName: \"kubernetes.io/projected/0babe492-76ca-4609-b35f-e1d613d06078-kube-api-access-xdfv6\") pod \"nova-cell0-conductor-db-sync-fr22c\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:22 crc kubenswrapper[4675]: I1125 12:48:22.556830 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:48:23 crc kubenswrapper[4675]: I1125 12:48:23.038512 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr22c"] Nov 25 12:48:23 crc kubenswrapper[4675]: W1125 12:48:23.039110 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0babe492_76ca_4609_b35f_e1d613d06078.slice/crio-3fa1aad2792d602bb309a811086127c22689d8d6eb0fe3608efbae0122f3baf2 WatchSource:0}: Error finding container 3fa1aad2792d602bb309a811086127c22689d8d6eb0fe3608efbae0122f3baf2: Status 404 returned error can't find the container with id 3fa1aad2792d602bb309a811086127c22689d8d6eb0fe3608efbae0122f3baf2 Nov 25 12:48:23 crc kubenswrapper[4675]: I1125 12:48:23.041710 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 12:48:23 crc kubenswrapper[4675]: I1125 12:48:23.461893 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr22c" event={"ID":"0babe492-76ca-4609-b35f-e1d613d06078","Type":"ContainerStarted","Data":"3fa1aad2792d602bb309a811086127c22689d8d6eb0fe3608efbae0122f3baf2"} Nov 25 12:48:27 crc kubenswrapper[4675]: I1125 12:48:27.511729 4675 generic.go:334] "Generic (PLEG): container finished" podID="001a017a-9695-401e-8f31-637949f418cc" containerID="00a0dbc4a3df1d9ac6ca93767e627e12dc9fdcde6131549b7ef2b132b83fc6a7" exitCode=137 Nov 25 12:48:27 crc kubenswrapper[4675]: I1125 12:48:27.512279 4675 generic.go:334] "Generic (PLEG): container finished" podID="001a017a-9695-401e-8f31-637949f418cc" containerID="b7ed964feea56273e9e9886a26b18b30f2ed63a812a6609c6948c9840f15af8a" exitCode=137 Nov 25 12:48:27 crc kubenswrapper[4675]: I1125 12:48:27.512295 4675 generic.go:334] "Generic (PLEG): container finished" podID="001a017a-9695-401e-8f31-637949f418cc" containerID="985d00e574541629b637ed52c38427d75645a909c063c4a1d4f83c6d6d75866a" exitCode=137 Nov 25 12:48:27 crc kubenswrapper[4675]: I1125 12:48:27.511802 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerDied","Data":"00a0dbc4a3df1d9ac6ca93767e627e12dc9fdcde6131549b7ef2b132b83fc6a7"} Nov 25 12:48:27 crc kubenswrapper[4675]: I1125 12:48:27.512345 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerDied","Data":"b7ed964feea56273e9e9886a26b18b30f2ed63a812a6609c6948c9840f15af8a"} Nov 25 12:48:27 crc kubenswrapper[4675]: I1125 12:48:27.512359 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerDied","Data":"985d00e574541629b637ed52c38427d75645a909c063c4a1d4f83c6d6d75866a"} Nov 25 12:48:31 crc kubenswrapper[4675]: I1125 12:48:31.561647 4675 generic.go:334] "Generic (PLEG): container finished" podID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerID="5895893fb049f53c9c3d5b457ebc6cc8920b7a0658b046ead35f78944694da4f" exitCode=137 Nov 25 12:48:31 crc kubenswrapper[4675]: I1125 12:48:31.561706 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerDied","Data":"5895893fb049f53c9c3d5b457ebc6cc8920b7a0658b046ead35f78944694da4f"} Nov 25 12:48:31 crc kubenswrapper[4675]: I1125 12:48:31.562071 4675 scope.go:117] "RemoveContainer" containerID="7bf144cdeff17dc28536fb9d88234abffec553b120667f546a29ca608a4da773" Nov 25 12:48:31 crc kubenswrapper[4675]: I1125 12:48:31.566088 4675 generic.go:334] "Generic (PLEG): container finished" podID="412d2040-4c83-4443-989e-cc844466e840" containerID="a8a62130160dd33e971232bb74e27eaa3176dab69a60bc1e0084e518a0762ce4" exitCode=137 Nov 25 12:48:31 crc kubenswrapper[4675]: I1125 12:48:31.566132 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerDied","Data":"a8a62130160dd33e971232bb74e27eaa3176dab69a60bc1e0084e518a0762ce4"} Nov 25 12:48:31 crc kubenswrapper[4675]: I1125 12:48:31.962642 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014274 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-484qf\" (UniqueName: \"kubernetes.io/projected/001a017a-9695-401e-8f31-637949f418cc-kube-api-access-484qf\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014343 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-scripts\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014443 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-combined-ca-bundle\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014502 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-run-httpd\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014528 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-log-httpd\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014556 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-config-data\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.014701 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-sg-core-conf-yaml\") pod \"001a017a-9695-401e-8f31-637949f418cc\" (UID: \"001a017a-9695-401e-8f31-637949f418cc\") " Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.016840 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.016902 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.024112 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-scripts" (OuterVolumeSpecName: "scripts") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.025016 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/001a017a-9695-401e-8f31-637949f418cc-kube-api-access-484qf" (OuterVolumeSpecName: "kube-api-access-484qf") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "kube-api-access-484qf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.050024 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.107958 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.117536 4675 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.117572 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-484qf\" (UniqueName: \"kubernetes.io/projected/001a017a-9695-401e-8f31-637949f418cc-kube-api-access-484qf\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.117625 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.117637 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.117649 4675 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.117659 4675 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/001a017a-9695-401e-8f31-637949f418cc-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.124381 4675 scope.go:117] "RemoveContainer" containerID="b8727dfcb754c1134bbfe12d6c7ac53701bf0d8ea86188c3e45e1d8ade2e2c7f" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.147304 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-config-data" (OuterVolumeSpecName: "config-data") pod "001a017a-9695-401e-8f31-637949f418cc" (UID: "001a017a-9695-401e-8f31-637949f418cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.219978 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/001a017a-9695-401e-8f31-637949f418cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.574297 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr22c" event={"ID":"0babe492-76ca-4609-b35f-e1d613d06078","Type":"ContainerStarted","Data":"67fd9d608c15ce7038ac66ff945d10e638b4dfb0ba706ed14583307038b4ac9d"} Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.577645 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerStarted","Data":"83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09"} Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.582450 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"001a017a-9695-401e-8f31-637949f418cc","Type":"ContainerDied","Data":"82b09628447c7408d20ff0f04e0a88c22bd8c62999c86c4e46d11ca2811bd708"} Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.582491 4675 scope.go:117] "RemoveContainer" containerID="00a0dbc4a3df1d9ac6ca93767e627e12dc9fdcde6131549b7ef2b132b83fc6a7" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.582593 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.599562 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d4f84f96-fcncp" event={"ID":"412d2040-4c83-4443-989e-cc844466e840","Type":"ContainerStarted","Data":"090e1c90333aadeb577764e452517063942bd432d2ce996e6c754b35559327ca"} Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.602227 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-fr22c" podStartSLOduration=1.519198237 podStartE2EDuration="10.602211514s" podCreationTimestamp="2025-11-25 12:48:22 +0000 UTC" firstStartedPulling="2025-11-25 12:48:23.041417308 +0000 UTC m=+1248.213009649" lastFinishedPulling="2025-11-25 12:48:32.124430585 +0000 UTC m=+1257.296022926" observedRunningTime="2025-11-25 12:48:32.596845205 +0000 UTC m=+1257.768437566" watchObservedRunningTime="2025-11-25 12:48:32.602211514 +0000 UTC m=+1257.773803865" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.613122 4675 scope.go:117] "RemoveContainer" containerID="c15646bfc3b38fca287b357ef4573b2188ad9e41906c9d1f5ef933fcf012875a" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.642756 4675 scope.go:117] "RemoveContainer" containerID="b7ed964feea56273e9e9886a26b18b30f2ed63a812a6609c6948c9840f15af8a" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.673314 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.673555 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.674164 4675 scope.go:117] "RemoveContainer" containerID="985d00e574541629b637ed52c38427d75645a909c063c4a1d4f83c6d6d75866a" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.692454 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:48:32 crc kubenswrapper[4675]: E1125 12:48:32.692917 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-central-agent" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.692940 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-central-agent" Nov 25 12:48:32 crc kubenswrapper[4675]: E1125 12:48:32.692976 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="proxy-httpd" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.692984 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="proxy-httpd" Nov 25 12:48:32 crc kubenswrapper[4675]: E1125 12:48:32.692997 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="sg-core" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.693009 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="sg-core" Nov 25 12:48:32 crc kubenswrapper[4675]: E1125 12:48:32.693039 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-notification-agent" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.693047 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-notification-agent" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.693285 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-notification-agent" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.693304 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="proxy-httpd" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.693326 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="sg-core" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.693340 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="001a017a-9695-401e-8f31-637949f418cc" containerName="ceilometer-central-agent" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.695353 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.701690 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.701951 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.715314 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835652 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tb9q4\" (UniqueName: \"kubernetes.io/projected/6fb59010-be01-41ed-8fb5-c2754f035e18-kube-api-access-tb9q4\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835721 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-run-httpd\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835783 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-log-httpd\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835810 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-config-data\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835846 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-scripts\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835916 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.835943 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.937911 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tb9q4\" (UniqueName: \"kubernetes.io/projected/6fb59010-be01-41ed-8fb5-c2754f035e18-kube-api-access-tb9q4\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938023 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-run-httpd\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938074 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-log-httpd\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938104 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-config-data\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938131 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-scripts\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938183 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938210 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.938869 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-log-httpd\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.939019 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-run-httpd\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.945460 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-config-data\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.945893 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.946986 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.950523 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-scripts\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:32 crc kubenswrapper[4675]: I1125 12:48:32.958455 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tb9q4\" (UniqueName: \"kubernetes.io/projected/6fb59010-be01-41ed-8fb5-c2754f035e18-kube-api-access-tb9q4\") pod \"ceilometer-0\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " pod="openstack/ceilometer-0" Nov 25 12:48:33 crc kubenswrapper[4675]: I1125 12:48:33.021939 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:48:33 crc kubenswrapper[4675]: I1125 12:48:33.505639 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:48:33 crc kubenswrapper[4675]: W1125 12:48:33.508110 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fb59010_be01_41ed_8fb5_c2754f035e18.slice/crio-ca1d73df2e6f160a63cc4ff4fe5e79f9e0bc2ba8b83cb703b0e414a179018ceb WatchSource:0}: Error finding container ca1d73df2e6f160a63cc4ff4fe5e79f9e0bc2ba8b83cb703b0e414a179018ceb: Status 404 returned error can't find the container with id ca1d73df2e6f160a63cc4ff4fe5e79f9e0bc2ba8b83cb703b0e414a179018ceb Nov 25 12:48:33 crc kubenswrapper[4675]: I1125 12:48:33.545972 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="001a017a-9695-401e-8f31-637949f418cc" path="/var/lib/kubelet/pods/001a017a-9695-401e-8f31-637949f418cc/volumes" Nov 25 12:48:33 crc kubenswrapper[4675]: I1125 12:48:33.613347 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerStarted","Data":"ca1d73df2e6f160a63cc4ff4fe5e79f9e0bc2ba8b83cb703b0e414a179018ceb"} Nov 25 12:48:34 crc kubenswrapper[4675]: I1125 12:48:34.625688 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerStarted","Data":"fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100"} Nov 25 12:48:35 crc kubenswrapper[4675]: I1125 12:48:35.637918 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerStarted","Data":"41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3"} Nov 25 12:48:37 crc kubenswrapper[4675]: I1125 12:48:37.664335 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerStarted","Data":"7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818"} Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.253410 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.254006 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.375369 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.375747 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.702109 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerStarted","Data":"fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7"} Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.702669 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 12:48:41 crc kubenswrapper[4675]: I1125 12:48:41.734555 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.780330365 podStartE2EDuration="9.734539236s" podCreationTimestamp="2025-11-25 12:48:32 +0000 UTC" firstStartedPulling="2025-11-25 12:48:33.510933556 +0000 UTC m=+1258.682525897" lastFinishedPulling="2025-11-25 12:48:40.465142427 +0000 UTC m=+1265.636734768" observedRunningTime="2025-11-25 12:48:41.731742118 +0000 UTC m=+1266.903334479" watchObservedRunningTime="2025-11-25 12:48:41.734539236 +0000 UTC m=+1266.906131577" Nov 25 12:48:51 crc kubenswrapper[4675]: I1125 12:48:51.269118 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:48:51 crc kubenswrapper[4675]: I1125 12:48:51.375843 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85d4f84f96-fcncp" podUID="412d2040-4c83-4443-989e-cc844466e840" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 25 12:48:58 crc kubenswrapper[4675]: I1125 12:48:58.874620 4675 generic.go:334] "Generic (PLEG): container finished" podID="0babe492-76ca-4609-b35f-e1d613d06078" containerID="67fd9d608c15ce7038ac66ff945d10e638b4dfb0ba706ed14583307038b4ac9d" exitCode=0 Nov 25 12:48:58 crc kubenswrapper[4675]: I1125 12:48:58.874704 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr22c" event={"ID":"0babe492-76ca-4609-b35f-e1d613d06078","Type":"ContainerDied","Data":"67fd9d608c15ce7038ac66ff945d10e638b4dfb0ba706ed14583307038b4ac9d"} Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.231480 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.298277 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-combined-ca-bundle\") pod \"0babe492-76ca-4609-b35f-e1d613d06078\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.298424 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdfv6\" (UniqueName: \"kubernetes.io/projected/0babe492-76ca-4609-b35f-e1d613d06078-kube-api-access-xdfv6\") pod \"0babe492-76ca-4609-b35f-e1d613d06078\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.298468 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-scripts\") pod \"0babe492-76ca-4609-b35f-e1d613d06078\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.298529 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-config-data\") pod \"0babe492-76ca-4609-b35f-e1d613d06078\" (UID: \"0babe492-76ca-4609-b35f-e1d613d06078\") " Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.303921 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0babe492-76ca-4609-b35f-e1d613d06078-kube-api-access-xdfv6" (OuterVolumeSpecName: "kube-api-access-xdfv6") pod "0babe492-76ca-4609-b35f-e1d613d06078" (UID: "0babe492-76ca-4609-b35f-e1d613d06078"). InnerVolumeSpecName "kube-api-access-xdfv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.305958 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-scripts" (OuterVolumeSpecName: "scripts") pod "0babe492-76ca-4609-b35f-e1d613d06078" (UID: "0babe492-76ca-4609-b35f-e1d613d06078"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.326028 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0babe492-76ca-4609-b35f-e1d613d06078" (UID: "0babe492-76ca-4609-b35f-e1d613d06078"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.330330 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-config-data" (OuterVolumeSpecName: "config-data") pod "0babe492-76ca-4609-b35f-e1d613d06078" (UID: "0babe492-76ca-4609-b35f-e1d613d06078"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.410847 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdfv6\" (UniqueName: \"kubernetes.io/projected/0babe492-76ca-4609-b35f-e1d613d06078-kube-api-access-xdfv6\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.410888 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.410909 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.410923 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0babe492-76ca-4609-b35f-e1d613d06078-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.895071 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr22c" event={"ID":"0babe492-76ca-4609-b35f-e1d613d06078","Type":"ContainerDied","Data":"3fa1aad2792d602bb309a811086127c22689d8d6eb0fe3608efbae0122f3baf2"} Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.895414 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fa1aad2792d602bb309a811086127c22689d8d6eb0fe3608efbae0122f3baf2" Nov 25 12:49:00 crc kubenswrapper[4675]: I1125 12:49:00.895491 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr22c" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.018982 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 12:49:01 crc kubenswrapper[4675]: E1125 12:49:01.019348 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0babe492-76ca-4609-b35f-e1d613d06078" containerName="nova-cell0-conductor-db-sync" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.019364 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="0babe492-76ca-4609-b35f-e1d613d06078" containerName="nova-cell0-conductor-db-sync" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.019545 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="0babe492-76ca-4609-b35f-e1d613d06078" containerName="nova-cell0-conductor-db-sync" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.020267 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.022429 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.025347 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-px4x4" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.034427 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.122721 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sh9q\" (UniqueName: \"kubernetes.io/projected/98ffb1d8-055d-41be-8c54-7282e6e1c36d-kube-api-access-9sh9q\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.122765 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ffb1d8-055d-41be-8c54-7282e6e1c36d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.122948 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ffb1d8-055d-41be-8c54-7282e6e1c36d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.224537 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sh9q\" (UniqueName: \"kubernetes.io/projected/98ffb1d8-055d-41be-8c54-7282e6e1c36d-kube-api-access-9sh9q\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.224595 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ffb1d8-055d-41be-8c54-7282e6e1c36d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.224677 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ffb1d8-055d-41be-8c54-7282e6e1c36d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.228597 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98ffb1d8-055d-41be-8c54-7282e6e1c36d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.228696 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98ffb1d8-055d-41be-8c54-7282e6e1c36d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.243608 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sh9q\" (UniqueName: \"kubernetes.io/projected/98ffb1d8-055d-41be-8c54-7282e6e1c36d-kube-api-access-9sh9q\") pod \"nova-cell0-conductor-0\" (UID: \"98ffb1d8-055d-41be-8c54-7282e6e1c36d\") " pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.340787 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.801551 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 12:49:01 crc kubenswrapper[4675]: I1125 12:49:01.903473 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"98ffb1d8-055d-41be-8c54-7282e6e1c36d","Type":"ContainerStarted","Data":"9d71f17d2ff1b8916da6512c15d97181d8be630e4f14638dc52c79acabb2df2d"} Nov 25 12:49:02 crc kubenswrapper[4675]: I1125 12:49:02.913856 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"98ffb1d8-055d-41be-8c54-7282e6e1c36d","Type":"ContainerStarted","Data":"dcc0a9056876e62e164ab2aded3f913944339271c785b601f0095f13890dec93"} Nov 25 12:49:02 crc kubenswrapper[4675]: I1125 12:49:02.914579 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:02 crc kubenswrapper[4675]: I1125 12:49:02.937873 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.937853584 podStartE2EDuration="2.937853584s" podCreationTimestamp="2025-11-25 12:49:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:02.928616223 +0000 UTC m=+1288.100208574" watchObservedRunningTime="2025-11-25 12:49:02.937853584 +0000 UTC m=+1288.109445935" Nov 25 12:49:03 crc kubenswrapper[4675]: I1125 12:49:03.034084 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 12:49:03 crc kubenswrapper[4675]: I1125 12:49:03.902058 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:49:03 crc kubenswrapper[4675]: I1125 12:49:03.924113 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:49:05 crc kubenswrapper[4675]: I1125 12:49:05.964456 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.114196 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-85d4f84f96-fcncp" Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.203953 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6df5497f4d-4g9tv"] Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.379890 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.873269 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-mlk8r"] Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.874685 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.879976 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.879976 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.901218 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-mlk8r"] Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.957659 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon-log" containerID="cri-o://70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6" gracePeriod=30 Nov 25 12:49:06 crc kubenswrapper[4675]: I1125 12:49:06.957794 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" containerID="cri-o://83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09" gracePeriod=30 Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.060995 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-scripts\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.061049 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.061108 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-config-data\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.061181 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkj25\" (UniqueName: \"kubernetes.io/projected/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-kube-api-access-xkj25\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.105058 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.106646 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.115021 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.139975 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.165093 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-scripts\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.165168 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.165242 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-config-data\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.165334 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkj25\" (UniqueName: \"kubernetes.io/projected/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-kube-api-access-xkj25\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.171435 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.182929 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-config-data\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.187594 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkj25\" (UniqueName: \"kubernetes.io/projected/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-kube-api-access-xkj25\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.213491 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-scripts\") pod \"nova-cell0-cell-mapping-mlk8r\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.268995 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.269117 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.269140 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fcss\" (UniqueName: \"kubernetes.io/projected/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-kube-api-access-7fcss\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.297735 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.306027 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.315112 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.324273 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.373503 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.373541 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fcss\" (UniqueName: \"kubernetes.io/projected/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-kube-api-access-7fcss\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.376486 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.399096 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.399181 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.415150 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fcss\" (UniqueName: \"kubernetes.io/projected/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-kube-api-access-7fcss\") pod \"nova-cell1-novncproxy-0\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.431078 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.432598 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.435146 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.442804 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.448872 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.478910 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-config-data\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.479015 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29qzs\" (UniqueName: \"kubernetes.io/projected/cc029e03-720d-491c-90e4-bf8f69a922e1-kube-api-access-29qzs\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.479078 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.496214 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.566073 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.568849 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.578284 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582011 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-config-data\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582084 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac77a77-f11e-4956-9a0d-5189412ccee4-logs\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582105 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582128 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l252\" (UniqueName: \"kubernetes.io/projected/2ac77a77-f11e-4956-9a0d-5189412ccee4-kube-api-access-4l252\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582239 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29qzs\" (UniqueName: \"kubernetes.io/projected/cc029e03-720d-491c-90e4-bf8f69a922e1-kube-api-access-29qzs\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582313 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-config-data\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.582344 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.597687 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.604660 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-config-data\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.618230 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.641451 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29qzs\" (UniqueName: \"kubernetes.io/projected/cc029e03-720d-491c-90e4-bf8f69a922e1-kube-api-access-29qzs\") pod \"nova-scheduler-0\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689575 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4svk\" (UniqueName: \"kubernetes.io/projected/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-kube-api-access-t4svk\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689621 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-config-data\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689714 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-logs\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689779 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-config-data\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689877 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689913 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac77a77-f11e-4956-9a0d-5189412ccee4-logs\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689931 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.689965 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l252\" (UniqueName: \"kubernetes.io/projected/2ac77a77-f11e-4956-9a0d-5189412ccee4-kube-api-access-4l252\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.692911 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac77a77-f11e-4956-9a0d-5189412ccee4-logs\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.705545 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-config-data\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.709107 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.727024 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l252\" (UniqueName: \"kubernetes.io/projected/2ac77a77-f11e-4956-9a0d-5189412ccee4-kube-api-access-4l252\") pod \"nova-api-0\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.793091 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.793443 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4svk\" (UniqueName: \"kubernetes.io/projected/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-kube-api-access-t4svk\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.793463 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-config-data\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.793511 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-logs\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.794113 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-logs\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.800765 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.812486 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-config-data\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.830490 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4svk\" (UniqueName: \"kubernetes.io/projected/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-kube-api-access-t4svk\") pod \"nova-metadata-0\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.832383 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-x8wzk"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.834248 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.867585 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.891526 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-x8wzk"] Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.894904 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-svc\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.894956 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlrlc\" (UniqueName: \"kubernetes.io/projected/8f773f1d-b032-4e64-ba15-1ec5cac38caa-kube-api-access-nlrlc\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.894976 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.895004 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.895081 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.895258 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-config\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.905182 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:49:07 crc kubenswrapper[4675]: I1125 12:49:07.929885 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:07.999850 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-svc\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:07.999972 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlrlc\" (UniqueName: \"kubernetes.io/projected/8f773f1d-b032-4e64-ba15-1ec5cac38caa-kube-api-access-nlrlc\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:07.999994 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.000034 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.000065 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.000193 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-config\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.000923 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-config\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.001423 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-svc\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.001679 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.002215 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.002227 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.028617 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlrlc\" (UniqueName: \"kubernetes.io/projected/8f773f1d-b032-4e64-ba15-1ec5cac38caa-kube-api-access-nlrlc\") pod \"dnsmasq-dns-757b4f8459-x8wzk\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.168542 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.346880 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.538623 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-mlk8r"] Nov 25 12:49:08 crc kubenswrapper[4675]: W1125 12:49:08.743995 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfeec8b61_9d90_4dd5_9261_e4f88ddaadfc.slice/crio-f93ea3bf760231a4281592775171bfed8a5104f7add5abd2b637753d8094fbd4 WatchSource:0}: Error finding container f93ea3bf760231a4281592775171bfed8a5104f7add5abd2b637753d8094fbd4: Status 404 returned error can't find the container with id f93ea3bf760231a4281592775171bfed8a5104f7add5abd2b637753d8094fbd4 Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.751182 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.782703 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.913933 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:08 crc kubenswrapper[4675]: I1125 12:49:08.928726 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-x8wzk"] Nov 25 12:49:08 crc kubenswrapper[4675]: W1125 12:49:08.938953 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f773f1d_b032_4e64_ba15_1ec5cac38caa.slice/crio-cf587ef8b11f4d40e71e896ac41a72d68079eee27c59ac3897b2c719cb38abc5 WatchSource:0}: Error finding container cf587ef8b11f4d40e71e896ac41a72d68079eee27c59ac3897b2c719cb38abc5: Status 404 returned error can't find the container with id cf587ef8b11f4d40e71e896ac41a72d68079eee27c59ac3897b2c719cb38abc5 Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.022100 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ac77a77-f11e-4956-9a0d-5189412ccee4","Type":"ContainerStarted","Data":"7183cab6eff37436b227b05f73d2f2a6d7289f53274c4937343f266f8b8ec3b0"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.029380 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"feec8b61-9d90-4dd5-9261-e4f88ddaadfc","Type":"ContainerStarted","Data":"f93ea3bf760231a4281592775171bfed8a5104f7add5abd2b637753d8094fbd4"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.034273 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-mlk8r" event={"ID":"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2","Type":"ContainerStarted","Data":"a91bba163fe4f08f11d7cd7c7301ac2cde5f3ac384d3b5e3ec5968003e5069c1"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.034536 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-mlk8r" event={"ID":"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2","Type":"ContainerStarted","Data":"1f43d638f10d5612022429570e11004a7e5ab0ba37ae8558624228ee3cf7539a"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.036276 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" event={"ID":"8f773f1d-b032-4e64-ba15-1ec5cac38caa","Type":"ContainerStarted","Data":"cf587ef8b11f4d40e71e896ac41a72d68079eee27c59ac3897b2c719cb38abc5"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.040644 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e3d1f9da-2700-492f-9ad2-19f0053f4b0a","Type":"ContainerStarted","Data":"2a2117e92bce6c902441f96bf12751a96a15c5f109985e59bf074850402a75b9"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.047691 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4fs77"] Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.049455 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cc029e03-720d-491c-90e4-bf8f69a922e1","Type":"ContainerStarted","Data":"b3327ce8d627cbc500b379ec8e26875b3ffff6892521b2c9ded7560b9b436767"} Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.049557 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.055278 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.055651 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.138849 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.138893 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-scripts\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.138966 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rpml\" (UniqueName: \"kubernetes.io/projected/06e0ca7d-c361-435c-90ac-29ff3e601751-kube-api-access-4rpml\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.139081 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-config-data\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.174332 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-mlk8r" podStartSLOduration=3.174311957 podStartE2EDuration="3.174311957s" podCreationTimestamp="2025-11-25 12:49:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:09.079636451 +0000 UTC m=+1294.251228792" watchObservedRunningTime="2025-11-25 12:49:09.174311957 +0000 UTC m=+1294.345904288" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.175767 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4fs77"] Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.241553 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rpml\" (UniqueName: \"kubernetes.io/projected/06e0ca7d-c361-435c-90ac-29ff3e601751-kube-api-access-4rpml\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.243128 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-config-data\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.243430 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.243659 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-scripts\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.249327 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-config-data\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.250096 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.251781 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-scripts\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.263665 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.264025 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" containerName="kube-state-metrics" containerID="cri-o://72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615" gracePeriod=30 Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.269381 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rpml\" (UniqueName: \"kubernetes.io/projected/06e0ca7d-c361-435c-90ac-29ff3e601751-kube-api-access-4rpml\") pod \"nova-cell1-conductor-db-sync-4fs77\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.434221 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.819569 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.862306 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gb8x\" (UniqueName: \"kubernetes.io/projected/f8d262fe-fd03-43eb-a9d8-fb43896cf021-kube-api-access-5gb8x\") pod \"f8d262fe-fd03-43eb-a9d8-fb43896cf021\" (UID: \"f8d262fe-fd03-43eb-a9d8-fb43896cf021\") " Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.870525 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8d262fe-fd03-43eb-a9d8-fb43896cf021-kube-api-access-5gb8x" (OuterVolumeSpecName: "kube-api-access-5gb8x") pod "f8d262fe-fd03-43eb-a9d8-fb43896cf021" (UID: "f8d262fe-fd03-43eb-a9d8-fb43896cf021"). InnerVolumeSpecName "kube-api-access-5gb8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:09 crc kubenswrapper[4675]: I1125 12:49:09.964103 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gb8x\" (UniqueName: \"kubernetes.io/projected/f8d262fe-fd03-43eb-a9d8-fb43896cf021-kube-api-access-5gb8x\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.061981 4675 generic.go:334] "Generic (PLEG): container finished" podID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerID="c9827326323f7a253eedffc9cf8b0bae0ab55665abebcd901aaa3c1b6a386eb6" exitCode=0 Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.062031 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" event={"ID":"8f773f1d-b032-4e64-ba15-1ec5cac38caa","Type":"ContainerDied","Data":"c9827326323f7a253eedffc9cf8b0bae0ab55665abebcd901aaa3c1b6a386eb6"} Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.102352 4675 generic.go:334] "Generic (PLEG): container finished" podID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" containerID="72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615" exitCode=2 Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.102619 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.103110 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8d262fe-fd03-43eb-a9d8-fb43896cf021","Type":"ContainerDied","Data":"72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615"} Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.103138 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8d262fe-fd03-43eb-a9d8-fb43896cf021","Type":"ContainerDied","Data":"a6c1281775c392defb4b452dec72494e5eab487fd72d60a75f740a06b75c11d3"} Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.103154 4675 scope.go:117] "RemoveContainer" containerID="72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.149987 4675 scope.go:117] "RemoveContainer" containerID="72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.164646 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.166346 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:49:10 crc kubenswrapper[4675]: E1125 12:49:10.170665 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615\": container with ID starting with 72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615 not found: ID does not exist" containerID="72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.170942 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615"} err="failed to get container status \"72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615\": rpc error: code = NotFound desc = could not find container \"72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615\": container with ID starting with 72329a76471006f9fa3ced4154e46da2bd876f01c50c7d511cdf72e6de3bb615 not found: ID does not exist" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.219675 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:49:10 crc kubenswrapper[4675]: E1125 12:49:10.220088 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" containerName="kube-state-metrics" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.220105 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" containerName="kube-state-metrics" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.220294 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" containerName="kube-state-metrics" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.224502 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.230442 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.230673 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.257145 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.298207 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4fs77"] Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.376459 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.376562 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzz2r\" (UniqueName: \"kubernetes.io/projected/76b55738-1ee0-41a4-950a-faa08432f67f-kube-api-access-wzz2r\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.376626 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.376651 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.479269 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzz2r\" (UniqueName: \"kubernetes.io/projected/76b55738-1ee0-41a4-950a-faa08432f67f-kube-api-access-wzz2r\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.479372 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.479403 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.479513 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.531784 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.533552 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.539832 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzz2r\" (UniqueName: \"kubernetes.io/projected/76b55738-1ee0-41a4-950a-faa08432f67f-kube-api-access-wzz2r\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.546725 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/76b55738-1ee0-41a4-950a-faa08432f67f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"76b55738-1ee0-41a4-950a-faa08432f67f\") " pod="openstack/kube-state-metrics-0" Nov 25 12:49:10 crc kubenswrapper[4675]: I1125 12:49:10.573581 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.118300 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4fs77" event={"ID":"06e0ca7d-c361-435c-90ac-29ff3e601751","Type":"ContainerStarted","Data":"943d5bedc67a845ec3273139aa12561bb2c11c86b87d0f868b4a93a9b954049c"} Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.118833 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4fs77" event={"ID":"06e0ca7d-c361-435c-90ac-29ff3e601751","Type":"ContainerStarted","Data":"d713cdfc6bd4d0cdb41ff64bd5ec85e54d291ba463aaf14fb13eb62715b96db4"} Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.122386 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" event={"ID":"8f773f1d-b032-4e64-ba15-1ec5cac38caa","Type":"ContainerStarted","Data":"0c4a5408da975bda9a1274fe30bb8adffcc858722b1da0f099ef3e83b4754176"} Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.123329 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.136745 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-4fs77" podStartSLOduration=2.136724244 podStartE2EDuration="2.136724244s" podCreationTimestamp="2025-11-25 12:49:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:11.131991645 +0000 UTC m=+1296.303583996" watchObservedRunningTime="2025-11-25 12:49:11.136724244 +0000 UTC m=+1296.308316585" Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.155887 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" podStartSLOduration=4.155870668 podStartE2EDuration="4.155870668s" podCreationTimestamp="2025-11-25 12:49:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:11.151908213 +0000 UTC m=+1296.323500574" watchObservedRunningTime="2025-11-25 12:49:11.155870668 +0000 UTC m=+1296.327463009" Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.442857 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.453313 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.469305 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:52658->10.217.0.148:8443: read: connection reset by peer" Nov 25 12:49:11 crc kubenswrapper[4675]: I1125 12:49:11.549444 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8d262fe-fd03-43eb-a9d8-fb43896cf021" path="/var/lib/kubelet/pods/f8d262fe-fd03-43eb-a9d8-fb43896cf021/volumes" Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.137702 4675 generic.go:334] "Generic (PLEG): container finished" podID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerID="83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09" exitCode=0 Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.138440 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerDied","Data":"83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09"} Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.138520 4675 scope.go:117] "RemoveContainer" containerID="5895893fb049f53c9c3d5b457ebc6cc8920b7a0658b046ead35f78944694da4f" Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.560722 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.561543 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-central-agent" containerID="cri-o://fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100" gracePeriod=30 Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.561995 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="proxy-httpd" containerID="cri-o://fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7" gracePeriod=30 Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.562052 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="sg-core" containerID="cri-o://7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818" gracePeriod=30 Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.562086 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-notification-agent" containerID="cri-o://41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3" gracePeriod=30 Nov 25 12:49:12 crc kubenswrapper[4675]: I1125 12:49:12.785868 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.153571 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerID="fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7" exitCode=0 Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.153598 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerID="7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818" exitCode=2 Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.153606 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerID="fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100" exitCode=0 Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.154263 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerDied","Data":"fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7"} Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.154291 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerDied","Data":"7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818"} Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.154301 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerDied","Data":"fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100"} Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.661935 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:49:13 crc kubenswrapper[4675]: I1125 12:49:13.662905 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.179939 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.187145 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerID="41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3" exitCode=0 Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.187432 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerDied","Data":"41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3"} Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.187462 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fb59010-be01-41ed-8fb5-c2754f035e18","Type":"ContainerDied","Data":"ca1d73df2e6f160a63cc4ff4fe5e79f9e0bc2ba8b83cb703b0e414a179018ceb"} Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.187484 4675 scope.go:117] "RemoveContainer" containerID="fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.194058 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerStarted","Data":"e003d0a4893eb7509b361b54a130151020eab3e65d48985e9b8bbe0cce7e63ef"} Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.252765 4675 scope.go:117] "RemoveContainer" containerID="7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.269861 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-sg-core-conf-yaml\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.269968 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-combined-ca-bundle\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.270033 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-run-httpd\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.270134 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-scripts\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.270178 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-log-httpd\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.270203 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tb9q4\" (UniqueName: \"kubernetes.io/projected/6fb59010-be01-41ed-8fb5-c2754f035e18-kube-api-access-tb9q4\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.270237 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-config-data\") pod \"6fb59010-be01-41ed-8fb5-c2754f035e18\" (UID: \"6fb59010-be01-41ed-8fb5-c2754f035e18\") " Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.272126 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.272338 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.288277 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fb59010-be01-41ed-8fb5-c2754f035e18-kube-api-access-tb9q4" (OuterVolumeSpecName: "kube-api-access-tb9q4") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "kube-api-access-tb9q4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.289708 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-scripts" (OuterVolumeSpecName: "scripts") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.335786 4675 scope.go:117] "RemoveContainer" containerID="41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.365613 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.375177 4675 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.375203 4675 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.375213 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.375221 4675 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fb59010-be01-41ed-8fb5-c2754f035e18-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.375228 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tb9q4\" (UniqueName: \"kubernetes.io/projected/6fb59010-be01-41ed-8fb5-c2754f035e18-kube-api-access-tb9q4\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.410410 4675 scope.go:117] "RemoveContainer" containerID="fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.441557 4675 scope.go:117] "RemoveContainer" containerID="fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7" Nov 25 12:49:14 crc kubenswrapper[4675]: E1125 12:49:14.442094 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7\": container with ID starting with fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7 not found: ID does not exist" containerID="fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.442122 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7"} err="failed to get container status \"fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7\": rpc error: code = NotFound desc = could not find container \"fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7\": container with ID starting with fc97ed5730a6e8b8d7b77f4fb42624c60e09148c3d19f7331f6f006ea66d62a7 not found: ID does not exist" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.442142 4675 scope.go:117] "RemoveContainer" containerID="7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818" Nov 25 12:49:14 crc kubenswrapper[4675]: E1125 12:49:14.448057 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818\": container with ID starting with 7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818 not found: ID does not exist" containerID="7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.448099 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818"} err="failed to get container status \"7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818\": rpc error: code = NotFound desc = could not find container \"7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818\": container with ID starting with 7dae4a7ef21253a40717e3bad0ed65f99f5dd5bf69e93421183be76c59810818 not found: ID does not exist" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.448124 4675 scope.go:117] "RemoveContainer" containerID="41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3" Nov 25 12:49:14 crc kubenswrapper[4675]: E1125 12:49:14.448551 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3\": container with ID starting with 41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3 not found: ID does not exist" containerID="41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.448592 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3"} err="failed to get container status \"41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3\": rpc error: code = NotFound desc = could not find container \"41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3\": container with ID starting with 41e2dac74c20fd7224846296b7035c80c2a55d664ceabdb2459a4aadaca838f3 not found: ID does not exist" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.448608 4675 scope.go:117] "RemoveContainer" containerID="fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100" Nov 25 12:49:14 crc kubenswrapper[4675]: E1125 12:49:14.448853 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100\": container with ID starting with fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100 not found: ID does not exist" containerID="fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.448876 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100"} err="failed to get container status \"fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100\": rpc error: code = NotFound desc = could not find container \"fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100\": container with ID starting with fa104043e4cb995efd5c3bc70e9f8ebc9af5740c22fad93192d4889299b06100 not found: ID does not exist" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.695661 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.764728 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-config-data" (OuterVolumeSpecName: "config-data") pod "6fb59010-be01-41ed-8fb5-c2754f035e18" (UID: "6fb59010-be01-41ed-8fb5-c2754f035e18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.785446 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:14 crc kubenswrapper[4675]: I1125 12:49:14.785476 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fb59010-be01-41ed-8fb5-c2754f035e18-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.227552 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerStarted","Data":"3a5e8646ac9dd8b63a9626924afdcc0168e8ea27d15bda32348f537ecbeda5d5"} Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.227643 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.230624 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e3d1f9da-2700-492f-9ad2-19f0053f4b0a","Type":"ContainerStarted","Data":"0580d1a2296c7169ae63e2bd56dd2f1f15e2619963e6bfc19ca4209e88b9ffb7"} Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.230792 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="e3d1f9da-2700-492f-9ad2-19f0053f4b0a" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://0580d1a2296c7169ae63e2bd56dd2f1f15e2619963e6bfc19ca4209e88b9ffb7" gracePeriod=30 Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.246003 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cc029e03-720d-491c-90e4-bf8f69a922e1","Type":"ContainerStarted","Data":"96350c383836fcebd98a3a0d8ebe49daf6fb1fc5f5fbd734349db1bf8fa70554"} Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.254638 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=4.556266889 podStartE2EDuration="5.254614426s" podCreationTimestamp="2025-11-25 12:49:10 +0000 UTC" firstStartedPulling="2025-11-25 12:49:13.743154783 +0000 UTC m=+1298.914747124" lastFinishedPulling="2025-11-25 12:49:14.44150232 +0000 UTC m=+1299.613094661" observedRunningTime="2025-11-25 12:49:15.251417275 +0000 UTC m=+1300.423009616" watchObservedRunningTime="2025-11-25 12:49:15.254614426 +0000 UTC m=+1300.426206777" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.273271 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ac77a77-f11e-4956-9a0d-5189412ccee4","Type":"ContainerStarted","Data":"3cc678ea005806705954ecf5b2d5305193a6ff1789bbab93ff8e6ece55197c02"} Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.282908 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"feec8b61-9d90-4dd5-9261-e4f88ddaadfc","Type":"ContainerStarted","Data":"6a00ca14fe314ed4d9c6e6b464cd9f3985241d85a86ad1de510765b2159568e4"} Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.282942 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-log" containerID="cri-o://773690c613c302bf324bbabd5e6ddc3589bc4c651edbd482aba15d21bb635bf6" gracePeriod=30 Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.283027 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-metadata" containerID="cri-o://6a00ca14fe314ed4d9c6e6b464cd9f3985241d85a86ad1de510765b2159568e4" gracePeriod=30 Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.282960 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"feec8b61-9d90-4dd5-9261-e4f88ddaadfc","Type":"ContainerStarted","Data":"773690c613c302bf324bbabd5e6ddc3589bc4c651edbd482aba15d21bb635bf6"} Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.287122 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.289472 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.213253967 podStartE2EDuration="8.289454405s" podCreationTimestamp="2025-11-25 12:49:07 +0000 UTC" firstStartedPulling="2025-11-25 12:49:08.932253043 +0000 UTC m=+1294.103845384" lastFinishedPulling="2025-11-25 12:49:14.008453481 +0000 UTC m=+1299.180045822" observedRunningTime="2025-11-25 12:49:15.281840045 +0000 UTC m=+1300.453432386" watchObservedRunningTime="2025-11-25 12:49:15.289454405 +0000 UTC m=+1300.461046746" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.335946 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.5908965779999997 podStartE2EDuration="8.335921061s" podCreationTimestamp="2025-11-25 12:49:07 +0000 UTC" firstStartedPulling="2025-11-25 12:49:08.385009742 +0000 UTC m=+1293.556602083" lastFinishedPulling="2025-11-25 12:49:14.130034225 +0000 UTC m=+1299.301626566" observedRunningTime="2025-11-25 12:49:15.319306156 +0000 UTC m=+1300.490898517" watchObservedRunningTime="2025-11-25 12:49:15.335921061 +0000 UTC m=+1300.507513422" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.370892 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.989060245 podStartE2EDuration="8.370873273s" podCreationTimestamp="2025-11-25 12:49:07 +0000 UTC" firstStartedPulling="2025-11-25 12:49:08.75717281 +0000 UTC m=+1293.928765151" lastFinishedPulling="2025-11-25 12:49:14.138985838 +0000 UTC m=+1299.310578179" observedRunningTime="2025-11-25 12:49:15.370237842 +0000 UTC m=+1300.541830203" watchObservedRunningTime="2025-11-25 12:49:15.370873273 +0000 UTC m=+1300.542465634" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.448240 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.517083 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.630916 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" path="/var/lib/kubelet/pods/6fb59010-be01-41ed-8fb5-c2754f035e18/volumes" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.632152 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:15 crc kubenswrapper[4675]: E1125 12:49:15.632563 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-notification-agent" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.632655 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-notification-agent" Nov 25 12:49:15 crc kubenswrapper[4675]: E1125 12:49:15.632745 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="proxy-httpd" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.632832 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="proxy-httpd" Nov 25 12:49:15 crc kubenswrapper[4675]: E1125 12:49:15.632924 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-central-agent" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.633002 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-central-agent" Nov 25 12:49:15 crc kubenswrapper[4675]: E1125 12:49:15.633084 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="sg-core" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.633321 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="sg-core" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.633624 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="sg-core" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.633721 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="proxy-httpd" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.634874 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-central-agent" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.634969 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb59010-be01-41ed-8fb5-c2754f035e18" containerName="ceilometer-notification-agent" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.637202 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.637501 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.644251 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.644478 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.644706 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.767194 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5nbk\" (UniqueName: \"kubernetes.io/projected/173ef998-03f7-4bfa-93c2-b2f00b965861-kube-api-access-b5nbk\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.767948 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-run-httpd\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.768117 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-scripts\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.768241 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-config-data\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.768351 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.768454 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.768926 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.769001 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-log-httpd\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871123 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871406 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-log-httpd\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871584 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5nbk\" (UniqueName: \"kubernetes.io/projected/173ef998-03f7-4bfa-93c2-b2f00b965861-kube-api-access-b5nbk\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871667 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-run-httpd\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871769 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-scripts\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871900 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-config-data\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.871985 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.872082 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.873437 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-log-httpd\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.873972 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-run-httpd\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.883752 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.885761 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.887460 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-config-data\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.890426 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-scripts\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.891015 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:15 crc kubenswrapper[4675]: I1125 12:49:15.908497 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5nbk\" (UniqueName: \"kubernetes.io/projected/173ef998-03f7-4bfa-93c2-b2f00b965861-kube-api-access-b5nbk\") pod \"ceilometer-0\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " pod="openstack/ceilometer-0" Nov 25 12:49:16 crc kubenswrapper[4675]: I1125 12:49:16.012154 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:16 crc kubenswrapper[4675]: I1125 12:49:16.316727 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ac77a77-f11e-4956-9a0d-5189412ccee4","Type":"ContainerStarted","Data":"579d9ed690ea19da6834ee9cb48d83c16009a3aaac4800a01ed932ada41c35da"} Nov 25 12:49:16 crc kubenswrapper[4675]: I1125 12:49:16.321440 4675 generic.go:334] "Generic (PLEG): container finished" podID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerID="773690c613c302bf324bbabd5e6ddc3589bc4c651edbd482aba15d21bb635bf6" exitCode=143 Nov 25 12:49:16 crc kubenswrapper[4675]: I1125 12:49:16.322633 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"feec8b61-9d90-4dd5-9261-e4f88ddaadfc","Type":"ContainerDied","Data":"773690c613c302bf324bbabd5e6ddc3589bc4c651edbd482aba15d21bb635bf6"} Nov 25 12:49:16 crc kubenswrapper[4675]: I1125 12:49:16.360222 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.928425145 podStartE2EDuration="9.360207518s" podCreationTimestamp="2025-11-25 12:49:07 +0000 UTC" firstStartedPulling="2025-11-25 12:49:08.787639342 +0000 UTC m=+1293.959231683" lastFinishedPulling="2025-11-25 12:49:14.219421715 +0000 UTC m=+1299.391014056" observedRunningTime="2025-11-25 12:49:16.358457232 +0000 UTC m=+1301.530049573" watchObservedRunningTime="2025-11-25 12:49:16.360207518 +0000 UTC m=+1301.531799859" Nov 25 12:49:16 crc kubenswrapper[4675]: I1125 12:49:16.670438 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.336717 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerStarted","Data":"5676abeb6f5471522beb6de720b7627b2ca7d26b11643406f287345cb258e0a1"} Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.443804 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.868779 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.869127 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.905959 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.906006 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.930450 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.930501 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 12:49:17 crc kubenswrapper[4675]: I1125 12:49:17.962746 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.172997 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.223754 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-rfj6r"] Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.223975 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" containerName="dnsmasq-dns" containerID="cri-o://7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30" gracePeriod=10 Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.361099 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerStarted","Data":"93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf"} Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.436187 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.953991 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:49:18 crc kubenswrapper[4675]: I1125 12:49:18.954252 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.071798 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.155801 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-svc\") pod \"12d2f6df-259a-49af-a8a7-df66608bd255\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.155875 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-nb\") pod \"12d2f6df-259a-49af-a8a7-df66608bd255\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.156011 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-sb\") pod \"12d2f6df-259a-49af-a8a7-df66608bd255\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.156077 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-config\") pod \"12d2f6df-259a-49af-a8a7-df66608bd255\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.156116 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdv2r\" (UniqueName: \"kubernetes.io/projected/12d2f6df-259a-49af-a8a7-df66608bd255-kube-api-access-wdv2r\") pod \"12d2f6df-259a-49af-a8a7-df66608bd255\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.156177 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-swift-storage-0\") pod \"12d2f6df-259a-49af-a8a7-df66608bd255\" (UID: \"12d2f6df-259a-49af-a8a7-df66608bd255\") " Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.193072 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12d2f6df-259a-49af-a8a7-df66608bd255-kube-api-access-wdv2r" (OuterVolumeSpecName: "kube-api-access-wdv2r") pod "12d2f6df-259a-49af-a8a7-df66608bd255" (UID: "12d2f6df-259a-49af-a8a7-df66608bd255"). InnerVolumeSpecName "kube-api-access-wdv2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.257744 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdv2r\" (UniqueName: \"kubernetes.io/projected/12d2f6df-259a-49af-a8a7-df66608bd255-kube-api-access-wdv2r\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.290414 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "12d2f6df-259a-49af-a8a7-df66608bd255" (UID: "12d2f6df-259a-49af-a8a7-df66608bd255"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.299232 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "12d2f6df-259a-49af-a8a7-df66608bd255" (UID: "12d2f6df-259a-49af-a8a7-df66608bd255"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.304118 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "12d2f6df-259a-49af-a8a7-df66608bd255" (UID: "12d2f6df-259a-49af-a8a7-df66608bd255"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.310853 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "12d2f6df-259a-49af-a8a7-df66608bd255" (UID: "12d2f6df-259a-49af-a8a7-df66608bd255"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.316998 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-config" (OuterVolumeSpecName: "config") pod "12d2f6df-259a-49af-a8a7-df66608bd255" (UID: "12d2f6df-259a-49af-a8a7-df66608bd255"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.362072 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.362104 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.362114 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.362123 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.362131 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12d2f6df-259a-49af-a8a7-df66608bd255-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.380747 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerStarted","Data":"c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819"} Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.392437 4675 generic.go:334] "Generic (PLEG): container finished" podID="12d2f6df-259a-49af-a8a7-df66608bd255" containerID="7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30" exitCode=0 Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.392835 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.393526 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" event={"ID":"12d2f6df-259a-49af-a8a7-df66608bd255","Type":"ContainerDied","Data":"7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30"} Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.393564 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-rfj6r" event={"ID":"12d2f6df-259a-49af-a8a7-df66608bd255","Type":"ContainerDied","Data":"67995dbc1ce6f4f0a25bdf36ac726793fd68ff887269a3f9f406f1b12cab3a7f"} Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.393585 4675 scope.go:117] "RemoveContainer" containerID="7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.442485 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-rfj6r"] Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.444937 4675 scope.go:117] "RemoveContainer" containerID="540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.462913 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-rfj6r"] Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.478109 4675 scope.go:117] "RemoveContainer" containerID="7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30" Nov 25 12:49:19 crc kubenswrapper[4675]: E1125 12:49:19.478527 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30\": container with ID starting with 7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30 not found: ID does not exist" containerID="7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.478557 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30"} err="failed to get container status \"7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30\": rpc error: code = NotFound desc = could not find container \"7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30\": container with ID starting with 7fcc1446262a22ebb81f801b4485c564c0de3fecf707bf3a3183017c197a7c30 not found: ID does not exist" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.478584 4675 scope.go:117] "RemoveContainer" containerID="540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c" Nov 25 12:49:19 crc kubenswrapper[4675]: E1125 12:49:19.478931 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c\": container with ID starting with 540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c not found: ID does not exist" containerID="540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.478950 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c"} err="failed to get container status \"540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c\": rpc error: code = NotFound desc = could not find container \"540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c\": container with ID starting with 540db77c64fe74a5c3460e1eb33164a7dad5fde0478820e56caecad533f2214c not found: ID does not exist" Nov 25 12:49:19 crc kubenswrapper[4675]: I1125 12:49:19.543216 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" path="/var/lib/kubelet/pods/12d2f6df-259a-49af-a8a7-df66608bd255/volumes" Nov 25 12:49:20 crc kubenswrapper[4675]: I1125 12:49:20.415524 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerStarted","Data":"3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610"} Nov 25 12:49:20 crc kubenswrapper[4675]: I1125 12:49:20.585058 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 12:49:21 crc kubenswrapper[4675]: I1125 12:49:21.255083 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:49:22 crc kubenswrapper[4675]: I1125 12:49:22.472765 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerStarted","Data":"49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb"} Nov 25 12:49:22 crc kubenswrapper[4675]: I1125 12:49:22.473285 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 12:49:22 crc kubenswrapper[4675]: I1125 12:49:22.498885 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.069524021 podStartE2EDuration="7.498862786s" podCreationTimestamp="2025-11-25 12:49:15 +0000 UTC" firstStartedPulling="2025-11-25 12:49:16.730949171 +0000 UTC m=+1301.902541512" lastFinishedPulling="2025-11-25 12:49:21.160287936 +0000 UTC m=+1306.331880277" observedRunningTime="2025-11-25 12:49:22.494938602 +0000 UTC m=+1307.666530953" watchObservedRunningTime="2025-11-25 12:49:22.498862786 +0000 UTC m=+1307.670455137" Nov 25 12:49:23 crc kubenswrapper[4675]: I1125 12:49:23.483647 4675 generic.go:334] "Generic (PLEG): container finished" podID="ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" containerID="a91bba163fe4f08f11d7cd7c7301ac2cde5f3ac384d3b5e3ec5968003e5069c1" exitCode=0 Nov 25 12:49:23 crc kubenswrapper[4675]: I1125 12:49:23.483727 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-mlk8r" event={"ID":"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2","Type":"ContainerDied","Data":"a91bba163fe4f08f11d7cd7c7301ac2cde5f3ac384d3b5e3ec5968003e5069c1"} Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.524515 4675 generic.go:334] "Generic (PLEG): container finished" podID="06e0ca7d-c361-435c-90ac-29ff3e601751" containerID="943d5bedc67a845ec3273139aa12561bb2c11c86b87d0f868b4a93a9b954049c" exitCode=0 Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.524561 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4fs77" event={"ID":"06e0ca7d-c361-435c-90ac-29ff3e601751","Type":"ContainerDied","Data":"943d5bedc67a845ec3273139aa12561bb2c11c86b87d0f868b4a93a9b954049c"} Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.895233 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.981468 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-scripts\") pod \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.981900 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkj25\" (UniqueName: \"kubernetes.io/projected/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-kube-api-access-xkj25\") pod \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.981970 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-combined-ca-bundle\") pod \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.982011 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-config-data\") pod \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\" (UID: \"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2\") " Nov 25 12:49:24 crc kubenswrapper[4675]: I1125 12:49:24.992429 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-kube-api-access-xkj25" (OuterVolumeSpecName: "kube-api-access-xkj25") pod "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" (UID: "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2"). InnerVolumeSpecName "kube-api-access-xkj25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.001992 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-scripts" (OuterVolumeSpecName: "scripts") pod "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" (UID: "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.013728 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" (UID: "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.020909 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-config-data" (OuterVolumeSpecName: "config-data") pod "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" (UID: "ab7f6527-92c4-48fb-b187-bb9dc5e6aac2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.084317 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.084351 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.084360 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.084368 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkj25\" (UniqueName: \"kubernetes.io/projected/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2-kube-api-access-xkj25\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.562458 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-mlk8r" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.588997 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-mlk8r" event={"ID":"ab7f6527-92c4-48fb-b187-bb9dc5e6aac2","Type":"ContainerDied","Data":"1f43d638f10d5612022429570e11004a7e5ab0ba37ae8558624228ee3cf7539a"} Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.589036 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f43d638f10d5612022429570e11004a7e5ab0ba37ae8558624228ee3cf7539a" Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.717591 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.717827 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-log" containerID="cri-o://3cc678ea005806705954ecf5b2d5305193a6ff1789bbab93ff8e6ece55197c02" gracePeriod=30 Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.718269 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-api" containerID="cri-o://579d9ed690ea19da6834ee9cb48d83c16009a3aaac4800a01ed932ada41c35da" gracePeriod=30 Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.756688 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.757146 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="cc029e03-720d-491c-90e4-bf8f69a922e1" containerName="nova-scheduler-scheduler" containerID="cri-o://96350c383836fcebd98a3a0d8ebe49daf6fb1fc5f5fbd734349db1bf8fa70554" gracePeriod=30 Nov 25 12:49:25 crc kubenswrapper[4675]: I1125 12:49:25.981491 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.100867 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rpml\" (UniqueName: \"kubernetes.io/projected/06e0ca7d-c361-435c-90ac-29ff3e601751-kube-api-access-4rpml\") pod \"06e0ca7d-c361-435c-90ac-29ff3e601751\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.100947 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-combined-ca-bundle\") pod \"06e0ca7d-c361-435c-90ac-29ff3e601751\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.101073 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-config-data\") pod \"06e0ca7d-c361-435c-90ac-29ff3e601751\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.101122 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-scripts\") pod \"06e0ca7d-c361-435c-90ac-29ff3e601751\" (UID: \"06e0ca7d-c361-435c-90ac-29ff3e601751\") " Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.119330 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06e0ca7d-c361-435c-90ac-29ff3e601751-kube-api-access-4rpml" (OuterVolumeSpecName: "kube-api-access-4rpml") pod "06e0ca7d-c361-435c-90ac-29ff3e601751" (UID: "06e0ca7d-c361-435c-90ac-29ff3e601751"). InnerVolumeSpecName "kube-api-access-4rpml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.123013 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-scripts" (OuterVolumeSpecName: "scripts") pod "06e0ca7d-c361-435c-90ac-29ff3e601751" (UID: "06e0ca7d-c361-435c-90ac-29ff3e601751"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.145082 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-config-data" (OuterVolumeSpecName: "config-data") pod "06e0ca7d-c361-435c-90ac-29ff3e601751" (UID: "06e0ca7d-c361-435c-90ac-29ff3e601751"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.188933 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06e0ca7d-c361-435c-90ac-29ff3e601751" (UID: "06e0ca7d-c361-435c-90ac-29ff3e601751"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.204321 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.204470 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.204530 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rpml\" (UniqueName: \"kubernetes.io/projected/06e0ca7d-c361-435c-90ac-29ff3e601751-kube-api-access-4rpml\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.204591 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e0ca7d-c361-435c-90ac-29ff3e601751-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.561886 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4fs77" event={"ID":"06e0ca7d-c361-435c-90ac-29ff3e601751","Type":"ContainerDied","Data":"d713cdfc6bd4d0cdb41ff64bd5ec85e54d291ba463aaf14fb13eb62715b96db4"} Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.562168 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4fs77" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.562181 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d713cdfc6bd4d0cdb41ff64bd5ec85e54d291ba463aaf14fb13eb62715b96db4" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.563853 4675 generic.go:334] "Generic (PLEG): container finished" podID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerID="3cc678ea005806705954ecf5b2d5305193a6ff1789bbab93ff8e6ece55197c02" exitCode=143 Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.563882 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ac77a77-f11e-4956-9a0d-5189412ccee4","Type":"ContainerDied","Data":"3cc678ea005806705954ecf5b2d5305193a6ff1789bbab93ff8e6ece55197c02"} Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.641866 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 12:49:26 crc kubenswrapper[4675]: E1125 12:49:26.643888 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" containerName="dnsmasq-dns" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.643918 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" containerName="dnsmasq-dns" Nov 25 12:49:26 crc kubenswrapper[4675]: E1125 12:49:26.643930 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" containerName="nova-manage" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.643937 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" containerName="nova-manage" Nov 25 12:49:26 crc kubenswrapper[4675]: E1125 12:49:26.643946 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e0ca7d-c361-435c-90ac-29ff3e601751" containerName="nova-cell1-conductor-db-sync" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.643953 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e0ca7d-c361-435c-90ac-29ff3e601751" containerName="nova-cell1-conductor-db-sync" Nov 25 12:49:26 crc kubenswrapper[4675]: E1125 12:49:26.643969 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" containerName="init" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.643975 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" containerName="init" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.644165 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e0ca7d-c361-435c-90ac-29ff3e601751" containerName="nova-cell1-conductor-db-sync" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.644178 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="12d2f6df-259a-49af-a8a7-df66608bd255" containerName="dnsmasq-dns" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.644191 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" containerName="nova-manage" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.644865 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.652788 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.665826 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.714128 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02b15b4e-fe03-46fb-9a34-c4e496129490-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.714388 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4rcm\" (UniqueName: \"kubernetes.io/projected/02b15b4e-fe03-46fb-9a34-c4e496129490-kube-api-access-m4rcm\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.714516 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02b15b4e-fe03-46fb-9a34-c4e496129490-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.816713 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02b15b4e-fe03-46fb-9a34-c4e496129490-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.816833 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4rcm\" (UniqueName: \"kubernetes.io/projected/02b15b4e-fe03-46fb-9a34-c4e496129490-kube-api-access-m4rcm\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.816900 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02b15b4e-fe03-46fb-9a34-c4e496129490-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.821494 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02b15b4e-fe03-46fb-9a34-c4e496129490-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.822143 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02b15b4e-fe03-46fb-9a34-c4e496129490-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.844453 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4rcm\" (UniqueName: \"kubernetes.io/projected/02b15b4e-fe03-46fb-9a34-c4e496129490-kube-api-access-m4rcm\") pod \"nova-cell1-conductor-0\" (UID: \"02b15b4e-fe03-46fb-9a34-c4e496129490\") " pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:26 crc kubenswrapper[4675]: I1125 12:49:26.976026 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.524279 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.590737 4675 generic.go:334] "Generic (PLEG): container finished" podID="cc029e03-720d-491c-90e4-bf8f69a922e1" containerID="96350c383836fcebd98a3a0d8ebe49daf6fb1fc5f5fbd734349db1bf8fa70554" exitCode=0 Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.590781 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cc029e03-720d-491c-90e4-bf8f69a922e1","Type":"ContainerDied","Data":"96350c383836fcebd98a3a0d8ebe49daf6fb1fc5f5fbd734349db1bf8fa70554"} Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.593323 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"02b15b4e-fe03-46fb-9a34-c4e496129490","Type":"ContainerStarted","Data":"c76db3aa0b94fec2999f7e5d4c32000e11638abc6a993efdec1d19ed1ae2b6ef"} Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.697722 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.737371 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29qzs\" (UniqueName: \"kubernetes.io/projected/cc029e03-720d-491c-90e4-bf8f69a922e1-kube-api-access-29qzs\") pod \"cc029e03-720d-491c-90e4-bf8f69a922e1\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.737490 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-config-data\") pod \"cc029e03-720d-491c-90e4-bf8f69a922e1\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.737556 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-combined-ca-bundle\") pod \"cc029e03-720d-491c-90e4-bf8f69a922e1\" (UID: \"cc029e03-720d-491c-90e4-bf8f69a922e1\") " Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.744628 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc029e03-720d-491c-90e4-bf8f69a922e1-kube-api-access-29qzs" (OuterVolumeSpecName: "kube-api-access-29qzs") pod "cc029e03-720d-491c-90e4-bf8f69a922e1" (UID: "cc029e03-720d-491c-90e4-bf8f69a922e1"). InnerVolumeSpecName "kube-api-access-29qzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.780930 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc029e03-720d-491c-90e4-bf8f69a922e1" (UID: "cc029e03-720d-491c-90e4-bf8f69a922e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.786641 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-config-data" (OuterVolumeSpecName: "config-data") pod "cc029e03-720d-491c-90e4-bf8f69a922e1" (UID: "cc029e03-720d-491c-90e4-bf8f69a922e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.839655 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29qzs\" (UniqueName: \"kubernetes.io/projected/cc029e03-720d-491c-90e4-bf8f69a922e1-kube-api-access-29qzs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.839957 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:27 crc kubenswrapper[4675]: I1125 12:49:27.839966 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc029e03-720d-491c-90e4-bf8f69a922e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.604863 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cc029e03-720d-491c-90e4-bf8f69a922e1","Type":"ContainerDied","Data":"b3327ce8d627cbc500b379ec8e26875b3ffff6892521b2c9ded7560b9b436767"} Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.604912 4675 scope.go:117] "RemoveContainer" containerID="96350c383836fcebd98a3a0d8ebe49daf6fb1fc5f5fbd734349db1bf8fa70554" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.605069 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.607957 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"02b15b4e-fe03-46fb-9a34-c4e496129490","Type":"ContainerStarted","Data":"93227cbc966e159d3407d8c5bf941eeef7b3f75532e63d173cfdbb9449f2cf20"} Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.608803 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.640603 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.640586431 podStartE2EDuration="2.640586431s" podCreationTimestamp="2025-11-25 12:49:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:28.638674251 +0000 UTC m=+1313.810266592" watchObservedRunningTime="2025-11-25 12:49:28.640586431 +0000 UTC m=+1313.812178762" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.708560 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.739704 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.751597 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:28 crc kubenswrapper[4675]: E1125 12:49:28.752093 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc029e03-720d-491c-90e4-bf8f69a922e1" containerName="nova-scheduler-scheduler" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.752137 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc029e03-720d-491c-90e4-bf8f69a922e1" containerName="nova-scheduler-scheduler" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.752495 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc029e03-720d-491c-90e4-bf8f69a922e1" containerName="nova-scheduler-scheduler" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.753259 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.756295 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.762500 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.862291 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.862334 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-config-data\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.862678 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dwpt\" (UniqueName: \"kubernetes.io/projected/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-kube-api-access-8dwpt\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.964233 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.964272 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-config-data\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.964367 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dwpt\" (UniqueName: \"kubernetes.io/projected/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-kube-api-access-8dwpt\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.968330 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-config-data\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.968383 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:28 crc kubenswrapper[4675]: I1125 12:49:28.983991 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dwpt\" (UniqueName: \"kubernetes.io/projected/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-kube-api-access-8dwpt\") pod \"nova-scheduler-0\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " pod="openstack/nova-scheduler-0" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.074539 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.550461 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc029e03-720d-491c-90e4-bf8f69a922e1" path="/var/lib/kubelet/pods/cc029e03-720d-491c-90e4-bf8f69a922e1/volumes" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.628203 4675 generic.go:334] "Generic (PLEG): container finished" podID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerID="579d9ed690ea19da6834ee9cb48d83c16009a3aaac4800a01ed932ada41c35da" exitCode=0 Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.628282 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ac77a77-f11e-4956-9a0d-5189412ccee4","Type":"ContainerDied","Data":"579d9ed690ea19da6834ee9cb48d83c16009a3aaac4800a01ed932ada41c35da"} Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.661659 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.688307 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.779556 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l252\" (UniqueName: \"kubernetes.io/projected/2ac77a77-f11e-4956-9a0d-5189412ccee4-kube-api-access-4l252\") pod \"2ac77a77-f11e-4956-9a0d-5189412ccee4\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.780325 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-combined-ca-bundle\") pod \"2ac77a77-f11e-4956-9a0d-5189412ccee4\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.780364 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-config-data\") pod \"2ac77a77-f11e-4956-9a0d-5189412ccee4\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.780387 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac77a77-f11e-4956-9a0d-5189412ccee4-logs\") pod \"2ac77a77-f11e-4956-9a0d-5189412ccee4\" (UID: \"2ac77a77-f11e-4956-9a0d-5189412ccee4\") " Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.780841 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ac77a77-f11e-4956-9a0d-5189412ccee4-logs" (OuterVolumeSpecName: "logs") pod "2ac77a77-f11e-4956-9a0d-5189412ccee4" (UID: "2ac77a77-f11e-4956-9a0d-5189412ccee4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.787034 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ac77a77-f11e-4956-9a0d-5189412ccee4-kube-api-access-4l252" (OuterVolumeSpecName: "kube-api-access-4l252") pod "2ac77a77-f11e-4956-9a0d-5189412ccee4" (UID: "2ac77a77-f11e-4956-9a0d-5189412ccee4"). InnerVolumeSpecName "kube-api-access-4l252". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.812203 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ac77a77-f11e-4956-9a0d-5189412ccee4" (UID: "2ac77a77-f11e-4956-9a0d-5189412ccee4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.812981 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-config-data" (OuterVolumeSpecName: "config-data") pod "2ac77a77-f11e-4956-9a0d-5189412ccee4" (UID: "2ac77a77-f11e-4956-9a0d-5189412ccee4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.883029 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l252\" (UniqueName: \"kubernetes.io/projected/2ac77a77-f11e-4956-9a0d-5189412ccee4-kube-api-access-4l252\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.883074 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.883088 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac77a77-f11e-4956-9a0d-5189412ccee4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:29 crc kubenswrapper[4675]: I1125 12:49:29.883100 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac77a77-f11e-4956-9a0d-5189412ccee4-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.642057 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0283b9a-4d82-4a68-9b49-31fde9eda7b6","Type":"ContainerStarted","Data":"6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1"} Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.642383 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0283b9a-4d82-4a68-9b49-31fde9eda7b6","Type":"ContainerStarted","Data":"d623bdc4526c89600a360cad5d0e674ff5f3df913a6bd16c927d7b8b946c7352"} Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.650137 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ac77a77-f11e-4956-9a0d-5189412ccee4","Type":"ContainerDied","Data":"7183cab6eff37436b227b05f73d2f2a6d7289f53274c4937343f266f8b8ec3b0"} Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.650118 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.650254 4675 scope.go:117] "RemoveContainer" containerID="579d9ed690ea19da6834ee9cb48d83c16009a3aaac4800a01ed932ada41c35da" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.669005 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.6689813190000002 podStartE2EDuration="2.668981319s" podCreationTimestamp="2025-11-25 12:49:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:30.655017098 +0000 UTC m=+1315.826609439" watchObservedRunningTime="2025-11-25 12:49:30.668981319 +0000 UTC m=+1315.840573670" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.684609 4675 scope.go:117] "RemoveContainer" containerID="3cc678ea005806705954ecf5b2d5305193a6ff1789bbab93ff8e6ece55197c02" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.693609 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.703789 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.722780 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:30 crc kubenswrapper[4675]: E1125 12:49:30.723248 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-log" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.723272 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-log" Nov 25 12:49:30 crc kubenswrapper[4675]: E1125 12:49:30.723293 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-api" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.723299 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-api" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.723485 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-api" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.723511 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" containerName="nova-api-log" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.724449 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.730377 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.759901 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.809086 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rcsr\" (UniqueName: \"kubernetes.io/projected/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-kube-api-access-4rcsr\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.809149 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-config-data\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.809189 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-logs\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.809293 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.910336 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.910432 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rcsr\" (UniqueName: \"kubernetes.io/projected/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-kube-api-access-4rcsr\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.910473 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-config-data\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.910518 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-logs\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.910995 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-logs\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.924864 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.926538 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-config-data\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:30 crc kubenswrapper[4675]: I1125 12:49:30.930720 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rcsr\" (UniqueName: \"kubernetes.io/projected/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-kube-api-access-4rcsr\") pod \"nova-api-0\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " pod="openstack/nova-api-0" Nov 25 12:49:31 crc kubenswrapper[4675]: I1125 12:49:31.043478 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:31 crc kubenswrapper[4675]: I1125 12:49:31.254652 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6df5497f4d-4g9tv" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 12:49:31 crc kubenswrapper[4675]: I1125 12:49:31.254778 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:49:31 crc kubenswrapper[4675]: I1125 12:49:31.501973 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:31 crc kubenswrapper[4675]: W1125 12:49:31.515166 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2ceb0ec_05e6_4ff3_aeaa_1c54c0e0846d.slice/crio-92d2f4f969e8c3f4e4973bff294439f5e343fd797609882b2a1e35a05d9c24e2 WatchSource:0}: Error finding container 92d2f4f969e8c3f4e4973bff294439f5e343fd797609882b2a1e35a05d9c24e2: Status 404 returned error can't find the container with id 92d2f4f969e8c3f4e4973bff294439f5e343fd797609882b2a1e35a05d9c24e2 Nov 25 12:49:31 crc kubenswrapper[4675]: I1125 12:49:31.548375 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ac77a77-f11e-4956-9a0d-5189412ccee4" path="/var/lib/kubelet/pods/2ac77a77-f11e-4956-9a0d-5189412ccee4/volumes" Nov 25 12:49:31 crc kubenswrapper[4675]: I1125 12:49:31.660841 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d","Type":"ContainerStarted","Data":"92d2f4f969e8c3f4e4973bff294439f5e343fd797609882b2a1e35a05d9c24e2"} Nov 25 12:49:32 crc kubenswrapper[4675]: I1125 12:49:32.674787 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d","Type":"ContainerStarted","Data":"00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84"} Nov 25 12:49:32 crc kubenswrapper[4675]: I1125 12:49:32.675174 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d","Type":"ContainerStarted","Data":"27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee"} Nov 25 12:49:32 crc kubenswrapper[4675]: I1125 12:49:32.699323 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.699293386 podStartE2EDuration="2.699293386s" podCreationTimestamp="2025-11-25 12:49:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:32.693315428 +0000 UTC m=+1317.864907779" watchObservedRunningTime="2025-11-25 12:49:32.699293386 +0000 UTC m=+1317.870885747" Nov 25 12:49:34 crc kubenswrapper[4675]: I1125 12:49:34.076017 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.007257 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.405297 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532501 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-tls-certs\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532629 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-logs\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532695 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-config-data\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532741 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-combined-ca-bundle\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532775 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-secret-key\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532803 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-scripts\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.532977 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6sbwp\" (UniqueName: \"kubernetes.io/projected/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-kube-api-access-6sbwp\") pod \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\" (UID: \"e3f6e6c2-9319-48ac-aeeb-38fd305a073d\") " Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.533172 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-logs" (OuterVolumeSpecName: "logs") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.535040 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.551445 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.554092 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-kube-api-access-6sbwp" (OuterVolumeSpecName: "kube-api-access-6sbwp") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "kube-api-access-6sbwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.605981 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.625906 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-config-data" (OuterVolumeSpecName: "config-data") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.635303 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-scripts" (OuterVolumeSpecName: "scripts") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.636639 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6sbwp\" (UniqueName: \"kubernetes.io/projected/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-kube-api-access-6sbwp\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.636667 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.636677 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.636686 4675 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.636696 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.673345 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "e3f6e6c2-9319-48ac-aeeb-38fd305a073d" (UID: "e3f6e6c2-9319-48ac-aeeb-38fd305a073d"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.738248 4675 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f6e6c2-9319-48ac-aeeb-38fd305a073d-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.749173 4675 generic.go:334] "Generic (PLEG): container finished" podID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerID="70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6" exitCode=137 Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.749227 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerDied","Data":"70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6"} Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.749261 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df5497f4d-4g9tv" event={"ID":"e3f6e6c2-9319-48ac-aeeb-38fd305a073d","Type":"ContainerDied","Data":"764f18847fe6852a3dbce69e26be8f51a0ecefc23fd9dfba73ff8543c2406f20"} Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.749281 4675 scope.go:117] "RemoveContainer" containerID="83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.749513 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df5497f4d-4g9tv" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.803762 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6df5497f4d-4g9tv"] Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.811391 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6df5497f4d-4g9tv"] Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.935146 4675 scope.go:117] "RemoveContainer" containerID="70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.956874 4675 scope.go:117] "RemoveContainer" containerID="83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09" Nov 25 12:49:37 crc kubenswrapper[4675]: E1125 12:49:37.957325 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09\": container with ID starting with 83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09 not found: ID does not exist" containerID="83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.957383 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09"} err="failed to get container status \"83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09\": rpc error: code = NotFound desc = could not find container \"83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09\": container with ID starting with 83ee38697cce9092f8117e2615eafe001474f474ea8c93ab4b857e311b620b09 not found: ID does not exist" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.957416 4675 scope.go:117] "RemoveContainer" containerID="70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6" Nov 25 12:49:37 crc kubenswrapper[4675]: E1125 12:49:37.957853 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6\": container with ID starting with 70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6 not found: ID does not exist" containerID="70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6" Nov 25 12:49:37 crc kubenswrapper[4675]: I1125 12:49:37.957903 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6"} err="failed to get container status \"70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6\": rpc error: code = NotFound desc = could not find container \"70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6\": container with ID starting with 70380bf13f2050477d821c075069b0596c7a078d9ed5b4b5b3639740f43ddbb6 not found: ID does not exist" Nov 25 12:49:39 crc kubenswrapper[4675]: I1125 12:49:39.076092 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 12:49:39 crc kubenswrapper[4675]: I1125 12:49:39.105899 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 12:49:39 crc kubenswrapper[4675]: I1125 12:49:39.543448 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" path="/var/lib/kubelet/pods/e3f6e6c2-9319-48ac-aeeb-38fd305a073d/volumes" Nov 25 12:49:39 crc kubenswrapper[4675]: I1125 12:49:39.809680 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 12:49:41 crc kubenswrapper[4675]: I1125 12:49:41.044107 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 12:49:41 crc kubenswrapper[4675]: I1125 12:49:41.044222 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 12:49:42 crc kubenswrapper[4675]: I1125 12:49:42.127413 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.197:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:49:42 crc kubenswrapper[4675]: I1125 12:49:42.127631 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.197:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:49:43 crc kubenswrapper[4675]: I1125 12:49:43.662154 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:49:43 crc kubenswrapper[4675]: I1125 12:49:43.662218 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:49:45 crc kubenswrapper[4675]: E1125 12:49:45.546001 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfeec8b61_9d90_4dd5_9261_e4f88ddaadfc.slice/crio-conmon-6a00ca14fe314ed4d9c6e6b464cd9f3985241d85a86ad1de510765b2159568e4.scope\": RecentStats: unable to find data in memory cache]" Nov 25 12:49:45 crc kubenswrapper[4675]: I1125 12:49:45.861426 4675 generic.go:334] "Generic (PLEG): container finished" podID="e3d1f9da-2700-492f-9ad2-19f0053f4b0a" containerID="0580d1a2296c7169ae63e2bd56dd2f1f15e2619963e6bfc19ca4209e88b9ffb7" exitCode=137 Nov 25 12:49:45 crc kubenswrapper[4675]: I1125 12:49:45.861503 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e3d1f9da-2700-492f-9ad2-19f0053f4b0a","Type":"ContainerDied","Data":"0580d1a2296c7169ae63e2bd56dd2f1f15e2619963e6bfc19ca4209e88b9ffb7"} Nov 25 12:49:45 crc kubenswrapper[4675]: I1125 12:49:45.866251 4675 generic.go:334] "Generic (PLEG): container finished" podID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerID="6a00ca14fe314ed4d9c6e6b464cd9f3985241d85a86ad1de510765b2159568e4" exitCode=137 Nov 25 12:49:45 crc kubenswrapper[4675]: I1125 12:49:45.866303 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"feec8b61-9d90-4dd5-9261-e4f88ddaadfc","Type":"ContainerDied","Data":"6a00ca14fe314ed4d9c6e6b464cd9f3985241d85a86ad1de510765b2159568e4"} Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.041659 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.215751 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.221887 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280484 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fcss\" (UniqueName: \"kubernetes.io/projected/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-kube-api-access-7fcss\") pod \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280537 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-config-data\") pod \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280575 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-combined-ca-bundle\") pod \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280608 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-config-data\") pod \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280629 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-combined-ca-bundle\") pod \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\" (UID: \"e3d1f9da-2700-492f-9ad2-19f0053f4b0a\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280708 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4svk\" (UniqueName: \"kubernetes.io/projected/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-kube-api-access-t4svk\") pod \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.280742 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-logs\") pod \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\" (UID: \"feec8b61-9d90-4dd5-9261-e4f88ddaadfc\") " Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.281336 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-logs" (OuterVolumeSpecName: "logs") pod "feec8b61-9d90-4dd5-9261-e4f88ddaadfc" (UID: "feec8b61-9d90-4dd5-9261-e4f88ddaadfc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.287260 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-kube-api-access-7fcss" (OuterVolumeSpecName: "kube-api-access-7fcss") pod "e3d1f9da-2700-492f-9ad2-19f0053f4b0a" (UID: "e3d1f9da-2700-492f-9ad2-19f0053f4b0a"). InnerVolumeSpecName "kube-api-access-7fcss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.296770 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-kube-api-access-t4svk" (OuterVolumeSpecName: "kube-api-access-t4svk") pod "feec8b61-9d90-4dd5-9261-e4f88ddaadfc" (UID: "feec8b61-9d90-4dd5-9261-e4f88ddaadfc"). InnerVolumeSpecName "kube-api-access-t4svk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.317040 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3d1f9da-2700-492f-9ad2-19f0053f4b0a" (UID: "e3d1f9da-2700-492f-9ad2-19f0053f4b0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.318739 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-config-data" (OuterVolumeSpecName: "config-data") pod "e3d1f9da-2700-492f-9ad2-19f0053f4b0a" (UID: "e3d1f9da-2700-492f-9ad2-19f0053f4b0a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.329151 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "feec8b61-9d90-4dd5-9261-e4f88ddaadfc" (UID: "feec8b61-9d90-4dd5-9261-e4f88ddaadfc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.333904 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-config-data" (OuterVolumeSpecName: "config-data") pod "feec8b61-9d90-4dd5-9261-e4f88ddaadfc" (UID: "feec8b61-9d90-4dd5-9261-e4f88ddaadfc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382485 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382509 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382518 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382527 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4svk\" (UniqueName: \"kubernetes.io/projected/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-kube-api-access-t4svk\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382537 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/feec8b61-9d90-4dd5-9261-e4f88ddaadfc-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382547 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fcss\" (UniqueName: \"kubernetes.io/projected/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-kube-api-access-7fcss\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.382557 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3d1f9da-2700-492f-9ad2-19f0053f4b0a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.877727 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.877722 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"feec8b61-9d90-4dd5-9261-e4f88ddaadfc","Type":"ContainerDied","Data":"f93ea3bf760231a4281592775171bfed8a5104f7add5abd2b637753d8094fbd4"} Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.879086 4675 scope.go:117] "RemoveContainer" containerID="6a00ca14fe314ed4d9c6e6b464cd9f3985241d85a86ad1de510765b2159568e4" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.879180 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e3d1f9da-2700-492f-9ad2-19f0053f4b0a","Type":"ContainerDied","Data":"2a2117e92bce6c902441f96bf12751a96a15c5f109985e59bf074850402a75b9"} Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.879520 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.905135 4675 scope.go:117] "RemoveContainer" containerID="773690c613c302bf324bbabd5e6ddc3589bc4c651edbd482aba15d21bb635bf6" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.922011 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.941286 4675 scope.go:117] "RemoveContainer" containerID="0580d1a2296c7169ae63e2bd56dd2f1f15e2619963e6bfc19ca4209e88b9ffb7" Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.957667 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.972269 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:46 crc kubenswrapper[4675]: I1125 12:49:46.995934 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.021243 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.028840 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.028901 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.028929 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d1f9da-2700-492f-9ad2-19f0053f4b0a" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.028938 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d1f9da-2700-492f-9ad2-19f0053f4b0a" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.028956 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-metadata" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.028964 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-metadata" Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.028998 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029005 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.029024 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-log" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029032 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-log" Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.029061 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon-log" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029072 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon-log" Nov 25 12:49:47 crc kubenswrapper[4675]: E1125 12:49:47.029092 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029099 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029549 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3d1f9da-2700-492f-9ad2-19f0053f4b0a" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029578 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029596 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon-log" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029616 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-log" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029675 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" containerName="nova-metadata-metadata" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029703 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.029720 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3f6e6c2-9319-48ac-aeeb-38fd305a073d" containerName="horizon" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.033963 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.038719 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.040656 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.040796 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.040976 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.062866 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.065777 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.068859 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.069051 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.081260 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200065 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-logs\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200103 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200124 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200150 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200185 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200329 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqjzw\" (UniqueName: \"kubernetes.io/projected/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-kube-api-access-zqjzw\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200407 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqh8l\" (UniqueName: \"kubernetes.io/projected/10485665-29b9-4a6f-ac17-3cca271b761d-kube-api-access-fqh8l\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200439 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200679 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-config-data\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.200716 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.301868 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.301949 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-config-data\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.301970 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302032 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-logs\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302069 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302090 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302114 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302147 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302212 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqjzw\" (UniqueName: \"kubernetes.io/projected/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-kube-api-access-zqjzw\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.302235 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqh8l\" (UniqueName: \"kubernetes.io/projected/10485665-29b9-4a6f-ac17-3cca271b761d-kube-api-access-fqh8l\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.305499 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-logs\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.311799 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.312589 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.312658 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.313116 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/10485665-29b9-4a6f-ac17-3cca271b761d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.319043 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-config-data\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.319967 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.321141 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.321566 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqh8l\" (UniqueName: \"kubernetes.io/projected/10485665-29b9-4a6f-ac17-3cca271b761d-kube-api-access-fqh8l\") pod \"nova-cell1-novncproxy-0\" (UID: \"10485665-29b9-4a6f-ac17-3cca271b761d\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.331863 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqjzw\" (UniqueName: \"kubernetes.io/projected/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-kube-api-access-zqjzw\") pod \"nova-metadata-0\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.359694 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.394689 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.549251 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3d1f9da-2700-492f-9ad2-19f0053f4b0a" path="/var/lib/kubelet/pods/e3d1f9da-2700-492f-9ad2-19f0053f4b0a/volumes" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.550391 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="feec8b61-9d90-4dd5-9261-e4f88ddaadfc" path="/var/lib/kubelet/pods/feec8b61-9d90-4dd5-9261-e4f88ddaadfc/volumes" Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.873038 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.893936 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"10485665-29b9-4a6f-ac17-3cca271b761d","Type":"ContainerStarted","Data":"3664f1edc21200d41c8fd9356686b3d52938a7b5607afa1eda6a3bbcfddfe7cb"} Nov 25 12:49:47 crc kubenswrapper[4675]: I1125 12:49:47.952282 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:49:48 crc kubenswrapper[4675]: I1125 12:49:48.904901 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5","Type":"ContainerStarted","Data":"64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543"} Nov 25 12:49:48 crc kubenswrapper[4675]: I1125 12:49:48.905368 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5","Type":"ContainerStarted","Data":"2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a"} Nov 25 12:49:48 crc kubenswrapper[4675]: I1125 12:49:48.905379 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5","Type":"ContainerStarted","Data":"01dfbbde2137bc367035098a1dc3b7a919c3f6f13207416d2f2cf2bf0fb07e04"} Nov 25 12:49:48 crc kubenswrapper[4675]: I1125 12:49:48.906952 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"10485665-29b9-4a6f-ac17-3cca271b761d","Type":"ContainerStarted","Data":"e82e5bad8b571f2b3ccd983b6eb3ad9c9689d5f153b538d108f36269b5ce36b2"} Nov 25 12:49:48 crc kubenswrapper[4675]: I1125 12:49:48.929722 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.929702496 podStartE2EDuration="2.929702496s" podCreationTimestamp="2025-11-25 12:49:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:48.923311854 +0000 UTC m=+1334.094904205" watchObservedRunningTime="2025-11-25 12:49:48.929702496 +0000 UTC m=+1334.101294847" Nov 25 12:49:48 crc kubenswrapper[4675]: I1125 12:49:48.950023 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.950006907 podStartE2EDuration="2.950006907s" podCreationTimestamp="2025-11-25 12:49:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:48.944211434 +0000 UTC m=+1334.115803785" watchObservedRunningTime="2025-11-25 12:49:48.950006907 +0000 UTC m=+1334.121599258" Nov 25 12:49:51 crc kubenswrapper[4675]: I1125 12:49:51.047773 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 12:49:51 crc kubenswrapper[4675]: I1125 12:49:51.048025 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 12:49:51 crc kubenswrapper[4675]: I1125 12:49:51.048512 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 12:49:51 crc kubenswrapper[4675]: I1125 12:49:51.051884 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 12:49:51 crc kubenswrapper[4675]: I1125 12:49:51.953347 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 12:49:51 crc kubenswrapper[4675]: I1125 12:49:51.957465 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.177010 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-jzgb8"] Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.189675 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.192258 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.192544 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z24mw\" (UniqueName: \"kubernetes.io/projected/1c8edeb5-7208-4cd6-b861-03997e90e85c-kube-api-access-z24mw\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.192722 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.192898 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.193007 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.193155 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-config\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.194326 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-jzgb8"] Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.293855 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.293970 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.294001 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.294075 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-config\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.294103 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.294130 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z24mw\" (UniqueName: \"kubernetes.io/projected/1c8edeb5-7208-4cd6-b861-03997e90e85c-kube-api-access-z24mw\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.295247 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.295376 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.295899 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-config\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.296002 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.299377 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.316056 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z24mw\" (UniqueName: \"kubernetes.io/projected/1c8edeb5-7208-4cd6-b861-03997e90e85c-kube-api-access-z24mw\") pod \"dnsmasq-dns-89c5cd4d5-jzgb8\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.361208 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.395209 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.395264 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 12:49:52 crc kubenswrapper[4675]: I1125 12:49:52.539049 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:53 crc kubenswrapper[4675]: I1125 12:49:53.030420 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-jzgb8"] Nov 25 12:49:53 crc kubenswrapper[4675]: I1125 12:49:53.990941 4675 generic.go:334] "Generic (PLEG): container finished" podID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerID="e64ec1af788e77cf7a5a1c47d824d508fc2fdc8cb8d7a66046997ee786ec5336" exitCode=0 Nov 25 12:49:53 crc kubenswrapper[4675]: I1125 12:49:53.992433 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" event={"ID":"1c8edeb5-7208-4cd6-b861-03997e90e85c","Type":"ContainerDied","Data":"e64ec1af788e77cf7a5a1c47d824d508fc2fdc8cb8d7a66046997ee786ec5336"} Nov 25 12:49:53 crc kubenswrapper[4675]: I1125 12:49:53.992467 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" event={"ID":"1c8edeb5-7208-4cd6-b861-03997e90e85c","Type":"ContainerStarted","Data":"f235d92f402b70049be41649c22b1257e74271af0f6a1fc226cc625921dfcbf6"} Nov 25 12:49:54 crc kubenswrapper[4675]: I1125 12:49:54.470323 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:54 crc kubenswrapper[4675]: I1125 12:49:54.470863 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-central-agent" containerID="cri-o://93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf" gracePeriod=30 Nov 25 12:49:54 crc kubenswrapper[4675]: I1125 12:49:54.471291 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="proxy-httpd" containerID="cri-o://49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb" gracePeriod=30 Nov 25 12:49:54 crc kubenswrapper[4675]: I1125 12:49:54.471350 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="sg-core" containerID="cri-o://3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610" gracePeriod=30 Nov 25 12:49:54 crc kubenswrapper[4675]: I1125 12:49:54.471383 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-notification-agent" containerID="cri-o://c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819" gracePeriod=30 Nov 25 12:49:54 crc kubenswrapper[4675]: I1125 12:49:54.754869 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.002470 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" event={"ID":"1c8edeb5-7208-4cd6-b861-03997e90e85c","Type":"ContainerStarted","Data":"29cabc1a090e5abe9ba14cddf3e032467818d361d9c295db684261aadc195918"} Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.002621 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005346 4675 generic.go:334] "Generic (PLEG): container finished" podID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerID="49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb" exitCode=0 Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005380 4675 generic.go:334] "Generic (PLEG): container finished" podID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerID="3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610" exitCode=2 Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005391 4675 generic.go:334] "Generic (PLEG): container finished" podID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerID="93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf" exitCode=0 Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005420 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerDied","Data":"49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb"} Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005463 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerDied","Data":"3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610"} Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005477 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerDied","Data":"93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf"} Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005562 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-log" containerID="cri-o://27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee" gracePeriod=30 Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.005595 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-api" containerID="cri-o://00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84" gracePeriod=30 Nov 25 12:49:55 crc kubenswrapper[4675]: I1125 12:49:55.038288 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" podStartSLOduration=3.038267296 podStartE2EDuration="3.038267296s" podCreationTimestamp="2025-11-25 12:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:49:55.029212691 +0000 UTC m=+1340.200805032" watchObservedRunningTime="2025-11-25 12:49:55.038267296 +0000 UTC m=+1340.209859637" Nov 25 12:49:56 crc kubenswrapper[4675]: I1125 12:49:56.014204 4675 generic.go:334] "Generic (PLEG): container finished" podID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerID="27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee" exitCode=143 Nov 25 12:49:56 crc kubenswrapper[4675]: I1125 12:49:56.015283 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d","Type":"ContainerDied","Data":"27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee"} Nov 25 12:49:57 crc kubenswrapper[4675]: I1125 12:49:57.360678 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:57 crc kubenswrapper[4675]: I1125 12:49:57.393592 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:57 crc kubenswrapper[4675]: I1125 12:49:57.395693 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 12:49:57 crc kubenswrapper[4675]: I1125 12:49:57.395738 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.049019 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.302861 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-t8hm9"] Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.304326 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.309081 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.309405 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.326108 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.326158 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh4mp\" (UniqueName: \"kubernetes.io/projected/857651f8-5af8-4ed3-95b4-20e2f9417d29-kube-api-access-hh4mp\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.326187 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-config-data\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.326233 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-scripts\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.340460 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-t8hm9"] Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.411661 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.199:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.411682 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.199:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.427902 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.427978 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh4mp\" (UniqueName: \"kubernetes.io/projected/857651f8-5af8-4ed3-95b4-20e2f9417d29-kube-api-access-hh4mp\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.428011 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-config-data\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.428071 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-scripts\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.450090 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-config-data\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.469440 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-scripts\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.474436 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.474547 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh4mp\" (UniqueName: \"kubernetes.io/projected/857651f8-5af8-4ed3-95b4-20e2f9417d29-kube-api-access-hh4mp\") pod \"nova-cell1-cell-mapping-t8hm9\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.702540 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.853348 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.893539 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961169 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-sg-core-conf-yaml\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961275 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-ceilometer-tls-certs\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961349 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5nbk\" (UniqueName: \"kubernetes.io/projected/173ef998-03f7-4bfa-93c2-b2f00b965861-kube-api-access-b5nbk\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961380 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-log-httpd\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961430 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-config-data\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961499 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-run-httpd\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961555 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-combined-ca-bundle\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.961588 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-scripts\") pod \"173ef998-03f7-4bfa-93c2-b2f00b965861\" (UID: \"173ef998-03f7-4bfa-93c2-b2f00b965861\") " Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.962195 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.962597 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.968762 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/173ef998-03f7-4bfa-93c2-b2f00b965861-kube-api-access-b5nbk" (OuterVolumeSpecName: "kube-api-access-b5nbk") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "kube-api-access-b5nbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:58 crc kubenswrapper[4675]: I1125 12:49:58.984872 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-scripts" (OuterVolumeSpecName: "scripts") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065007 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rcsr\" (UniqueName: \"kubernetes.io/projected/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-kube-api-access-4rcsr\") pod \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065325 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-combined-ca-bundle\") pod \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065442 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-logs\") pod \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065461 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-config-data\") pod \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\" (UID: \"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d\") " Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065791 4675 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065803 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065825 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5nbk\" (UniqueName: \"kubernetes.io/projected/173ef998-03f7-4bfa-93c2-b2f00b965861-kube-api-access-b5nbk\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.065837 4675 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/173ef998-03f7-4bfa-93c2-b2f00b965861-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.073959 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-logs" (OuterVolumeSpecName: "logs") pod "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" (UID: "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.109251 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-kube-api-access-4rcsr" (OuterVolumeSpecName: "kube-api-access-4rcsr") pod "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" (UID: "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d"). InnerVolumeSpecName "kube-api-access-4rcsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.112682 4675 generic.go:334] "Generic (PLEG): container finished" podID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerID="c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819" exitCode=0 Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.112756 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerDied","Data":"c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819"} Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.112781 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"173ef998-03f7-4bfa-93c2-b2f00b965861","Type":"ContainerDied","Data":"5676abeb6f5471522beb6de720b7627b2ca7d26b11643406f287345cb258e0a1"} Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.112798 4675 scope.go:117] "RemoveContainer" containerID="49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.112937 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.121669 4675 generic.go:334] "Generic (PLEG): container finished" podID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerID="00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84" exitCode=0 Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.121924 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.122427 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d","Type":"ContainerDied","Data":"00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84"} Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.125515 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d","Type":"ContainerDied","Data":"92d2f4f969e8c3f4e4973bff294439f5e343fd797609882b2a1e35a05d9c24e2"} Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.142373 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.159876 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" (UID: "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.164539 4675 scope.go:117] "RemoveContainer" containerID="3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.166963 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-config-data" (OuterVolumeSpecName: "config-data") pod "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" (UID: "c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.168288 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rcsr\" (UniqueName: \"kubernetes.io/projected/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-kube-api-access-4rcsr\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.168310 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.168320 4675 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.168329 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.168340 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.173095 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.202445 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.211368 4675 scope.go:117] "RemoveContainer" containerID="c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.218108 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-config-data" (OuterVolumeSpecName: "config-data") pod "173ef998-03f7-4bfa-93c2-b2f00b965861" (UID: "173ef998-03f7-4bfa-93c2-b2f00b965861"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.270176 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.270202 4675 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.270211 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/173ef998-03f7-4bfa-93c2-b2f00b965861-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.313660 4675 scope.go:117] "RemoveContainer" containerID="93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.335679 4675 scope.go:117] "RemoveContainer" containerID="49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.336081 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb\": container with ID starting with 49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb not found: ID does not exist" containerID="49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.336110 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb"} err="failed to get container status \"49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb\": rpc error: code = NotFound desc = could not find container \"49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb\": container with ID starting with 49efedf7ba716c91c9e665d8c660775d20afb801c0180c4106cbcb0669043ffb not found: ID does not exist" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.336135 4675 scope.go:117] "RemoveContainer" containerID="3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.336406 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610\": container with ID starting with 3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610 not found: ID does not exist" containerID="3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.336448 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610"} err="failed to get container status \"3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610\": rpc error: code = NotFound desc = could not find container \"3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610\": container with ID starting with 3a6f76018a208101ab05ec1b8fffca8df40fa67b104bdc1aa50727853b145610 not found: ID does not exist" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.336483 4675 scope.go:117] "RemoveContainer" containerID="c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.336731 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819\": container with ID starting with c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819 not found: ID does not exist" containerID="c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.336750 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819"} err="failed to get container status \"c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819\": rpc error: code = NotFound desc = could not find container \"c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819\": container with ID starting with c67f4162b33ca60651478c46fe374560b1215626a1a79416d6e963f877127819 not found: ID does not exist" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.336764 4675 scope.go:117] "RemoveContainer" containerID="93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.336975 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf\": container with ID starting with 93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf not found: ID does not exist" containerID="93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.337008 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf"} err="failed to get container status \"93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf\": rpc error: code = NotFound desc = could not find container \"93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf\": container with ID starting with 93708533119fc0a90c7526953ff1eff91eb8d4bec8ce6abb17b0a574fbc75acf not found: ID does not exist" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.337030 4675 scope.go:117] "RemoveContainer" containerID="00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.378085 4675 scope.go:117] "RemoveContainer" containerID="27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.407513 4675 scope.go:117] "RemoveContainer" containerID="00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.408038 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84\": container with ID starting with 00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84 not found: ID does not exist" containerID="00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.408083 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84"} err="failed to get container status \"00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84\": rpc error: code = NotFound desc = could not find container \"00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84\": container with ID starting with 00d2c02aea046c716a8acea4051114a64b825446c433c22995d7f07d65bafe84 not found: ID does not exist" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.408117 4675 scope.go:117] "RemoveContainer" containerID="27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.409853 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee\": container with ID starting with 27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee not found: ID does not exist" containerID="27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.409890 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee"} err="failed to get container status \"27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee\": rpc error: code = NotFound desc = could not find container \"27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee\": container with ID starting with 27a82829d36606f1676b3d758dea4396c98128b8effd05f637ae539f14a043ee not found: ID does not exist" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.456005 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.505582 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.586579 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" path="/var/lib/kubelet/pods/173ef998-03f7-4bfa-93c2-b2f00b965861/volumes" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.587902 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.589952 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-notification-agent" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.589979 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-notification-agent" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.589989 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="sg-core" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.590000 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="sg-core" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.590023 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-log" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.590031 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-log" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.590068 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="proxy-httpd" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.590076 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="proxy-httpd" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.590099 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-central-agent" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.590106 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-central-agent" Nov 25 12:49:59 crc kubenswrapper[4675]: E1125 12:49:59.590130 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-api" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.590137 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-api" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.591445 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="sg-core" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.591488 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-log" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.591505 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="proxy-httpd" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.591529 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" containerName="nova-api-api" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.591537 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-notification-agent" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.591548 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="173ef998-03f7-4bfa-93c2-b2f00b965861" containerName="ceilometer-central-agent" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.600441 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.600675 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.611135 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.616577 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.616903 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.617042 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.628462 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.644781 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.646962 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.649643 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.649838 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.650396 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.660101 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786197 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-config-data\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786593 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a603c80e-1d69-41a1-99fb-dfde13a182d6-run-httpd\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786639 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-config-data\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786673 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786846 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-public-tls-certs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786893 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a603c80e-1d69-41a1-99fb-dfde13a182d6-log-httpd\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786952 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.786993 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.787026 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjmfw\" (UniqueName: \"kubernetes.io/projected/693ad55c-37af-4c85-8326-cf39d4613fdf-kube-api-access-tjmfw\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.787092 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.787229 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brqf7\" (UniqueName: \"kubernetes.io/projected/a603c80e-1d69-41a1-99fb-dfde13a182d6-kube-api-access-brqf7\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.787299 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.787328 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693ad55c-37af-4c85-8326-cf39d4613fdf-logs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.787361 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-scripts\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.888890 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a603c80e-1d69-41a1-99fb-dfde13a182d6-run-httpd\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.888952 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-config-data\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.888978 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889013 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-public-tls-certs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889032 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a603c80e-1d69-41a1-99fb-dfde13a182d6-log-httpd\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889060 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889083 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889104 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjmfw\" (UniqueName: \"kubernetes.io/projected/693ad55c-37af-4c85-8326-cf39d4613fdf-kube-api-access-tjmfw\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889123 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889163 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brqf7\" (UniqueName: \"kubernetes.io/projected/a603c80e-1d69-41a1-99fb-dfde13a182d6-kube-api-access-brqf7\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889195 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889213 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693ad55c-37af-4c85-8326-cf39d4613fdf-logs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889233 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-scripts\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.889263 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-config-data\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.890780 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693ad55c-37af-4c85-8326-cf39d4613fdf-logs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.892372 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a603c80e-1d69-41a1-99fb-dfde13a182d6-log-httpd\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.892829 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a603c80e-1d69-41a1-99fb-dfde13a182d6-run-httpd\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.908453 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.909121 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-config-data\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.909303 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-config-data\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.909373 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjmfw\" (UniqueName: \"kubernetes.io/projected/693ad55c-37af-4c85-8326-cf39d4613fdf-kube-api-access-tjmfw\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.909452 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-public-tls-certs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.909696 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.910393 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.911005 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-scripts\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.913044 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " pod="openstack/nova-api-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.913504 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brqf7\" (UniqueName: \"kubernetes.io/projected/a603c80e-1d69-41a1-99fb-dfde13a182d6-kube-api-access-brqf7\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.917158 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a603c80e-1d69-41a1-99fb-dfde13a182d6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a603c80e-1d69-41a1-99fb-dfde13a182d6\") " pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.923939 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 12:49:59 crc kubenswrapper[4675]: I1125 12:49:59.980232 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:50:00 crc kubenswrapper[4675]: I1125 12:50:00.052896 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-t8hm9"] Nov 25 12:50:00 crc kubenswrapper[4675]: I1125 12:50:00.154862 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-t8hm9" event={"ID":"857651f8-5af8-4ed3-95b4-20e2f9417d29","Type":"ContainerStarted","Data":"7505f2963a08f229395bcf4b732f1ce9933fb49d59659eb82d15c8e8fddeeb84"} Nov 25 12:50:00 crc kubenswrapper[4675]: I1125 12:50:00.515600 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 12:50:00 crc kubenswrapper[4675]: W1125 12:50:00.612546 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod693ad55c_37af_4c85_8326_cf39d4613fdf.slice/crio-557b54a8a029893adf7449510396f28f8aead869431abab7c829fb7f64c30fd8 WatchSource:0}: Error finding container 557b54a8a029893adf7449510396f28f8aead869431abab7c829fb7f64c30fd8: Status 404 returned error can't find the container with id 557b54a8a029893adf7449510396f28f8aead869431abab7c829fb7f64c30fd8 Nov 25 12:50:00 crc kubenswrapper[4675]: I1125 12:50:00.613414 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.177986 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"693ad55c-37af-4c85-8326-cf39d4613fdf","Type":"ContainerStarted","Data":"f631c7b18a8798e0707d7b486d9edfa3dfa03ecc8cabfa8b440bd0877518ac1f"} Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.178224 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"693ad55c-37af-4c85-8326-cf39d4613fdf","Type":"ContainerStarted","Data":"300473e1a8157ec44f3b32ab48854dc2894458c85303c1418f17bfaee60d6bdd"} Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.178238 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"693ad55c-37af-4c85-8326-cf39d4613fdf","Type":"ContainerStarted","Data":"557b54a8a029893adf7449510396f28f8aead869431abab7c829fb7f64c30fd8"} Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.180186 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-t8hm9" event={"ID":"857651f8-5af8-4ed3-95b4-20e2f9417d29","Type":"ContainerStarted","Data":"bac85683d946adfc0e86942464942fd73ad3f74ccdfa42d8c60da5e55cea7822"} Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.181766 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a603c80e-1d69-41a1-99fb-dfde13a182d6","Type":"ContainerStarted","Data":"b67fefc01bda6e26149518f231dd72896825bc2143c59aedb3c04567bea6a7a1"} Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.181796 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a603c80e-1d69-41a1-99fb-dfde13a182d6","Type":"ContainerStarted","Data":"f2a9e22dbbc93fb588843ee84c413e900b22ecdc3e10ecb210274aa4284ee344"} Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.253071 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-t8hm9" podStartSLOduration=3.253047206 podStartE2EDuration="3.253047206s" podCreationTimestamp="2025-11-25 12:49:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:50:01.2525369 +0000 UTC m=+1346.424129251" watchObservedRunningTime="2025-11-25 12:50:01.253047206 +0000 UTC m=+1346.424639577" Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.253295 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.253287224 podStartE2EDuration="2.253287224s" podCreationTimestamp="2025-11-25 12:49:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:50:01.226725315 +0000 UTC m=+1346.398317656" watchObservedRunningTime="2025-11-25 12:50:01.253287224 +0000 UTC m=+1346.424879565" Nov 25 12:50:01 crc kubenswrapper[4675]: I1125 12:50:01.543563 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d" path="/var/lib/kubelet/pods/c2ceb0ec-05e6-4ff3-aeaa-1c54c0e0846d/volumes" Nov 25 12:50:02 crc kubenswrapper[4675]: I1125 12:50:02.205501 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a603c80e-1d69-41a1-99fb-dfde13a182d6","Type":"ContainerStarted","Data":"de6c2b104017d0954af3a1fd67472ce0d39da5abf9495597c13cd4a7a8ae1869"} Nov 25 12:50:02 crc kubenswrapper[4675]: I1125 12:50:02.540900 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:50:02 crc kubenswrapper[4675]: I1125 12:50:02.614180 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-x8wzk"] Nov 25 12:50:02 crc kubenswrapper[4675]: I1125 12:50:02.614404 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="dnsmasq-dns" containerID="cri-o://0c4a5408da975bda9a1274fe30bb8adffcc858722b1da0f099ef3e83b4754176" gracePeriod=10 Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.238639 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a603c80e-1d69-41a1-99fb-dfde13a182d6","Type":"ContainerStarted","Data":"f8c8a8e867ae495849d5ba729fc71aabe1f777dfe9aaf91b6eaa813c5f77cd5c"} Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.242176 4675 generic.go:334] "Generic (PLEG): container finished" podID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerID="0c4a5408da975bda9a1274fe30bb8adffcc858722b1da0f099ef3e83b4754176" exitCode=0 Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.242206 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" event={"ID":"8f773f1d-b032-4e64-ba15-1ec5cac38caa","Type":"ContainerDied","Data":"0c4a5408da975bda9a1274fe30bb8adffcc858722b1da0f099ef3e83b4754176"} Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.307247 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.378008 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-svc\") pod \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.378090 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-config\") pod \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.378150 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-sb\") pod \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.378233 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-swift-storage-0\") pod \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.378308 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlrlc\" (UniqueName: \"kubernetes.io/projected/8f773f1d-b032-4e64-ba15-1ec5cac38caa-kube-api-access-nlrlc\") pod \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.378366 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-nb\") pod \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\" (UID: \"8f773f1d-b032-4e64-ba15-1ec5cac38caa\") " Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.385930 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f773f1d-b032-4e64-ba15-1ec5cac38caa-kube-api-access-nlrlc" (OuterVolumeSpecName: "kube-api-access-nlrlc") pod "8f773f1d-b032-4e64-ba15-1ec5cac38caa" (UID: "8f773f1d-b032-4e64-ba15-1ec5cac38caa"). InnerVolumeSpecName "kube-api-access-nlrlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.460702 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8f773f1d-b032-4e64-ba15-1ec5cac38caa" (UID: "8f773f1d-b032-4e64-ba15-1ec5cac38caa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.463250 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-config" (OuterVolumeSpecName: "config") pod "8f773f1d-b032-4e64-ba15-1ec5cac38caa" (UID: "8f773f1d-b032-4e64-ba15-1ec5cac38caa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.477853 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8f773f1d-b032-4e64-ba15-1ec5cac38caa" (UID: "8f773f1d-b032-4e64-ba15-1ec5cac38caa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.478014 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8f773f1d-b032-4e64-ba15-1ec5cac38caa" (UID: "8f773f1d-b032-4e64-ba15-1ec5cac38caa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.482940 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.482973 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.483005 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.483019 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.483034 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlrlc\" (UniqueName: \"kubernetes.io/projected/8f773f1d-b032-4e64-ba15-1ec5cac38caa-kube-api-access-nlrlc\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.485772 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8f773f1d-b032-4e64-ba15-1ec5cac38caa" (UID: "8f773f1d-b032-4e64-ba15-1ec5cac38caa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:03 crc kubenswrapper[4675]: I1125 12:50:03.585139 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8f773f1d-b032-4e64-ba15-1ec5cac38caa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:04 crc kubenswrapper[4675]: I1125 12:50:04.252740 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" event={"ID":"8f773f1d-b032-4e64-ba15-1ec5cac38caa","Type":"ContainerDied","Data":"cf587ef8b11f4d40e71e896ac41a72d68079eee27c59ac3897b2c719cb38abc5"} Nov 25 12:50:04 crc kubenswrapper[4675]: I1125 12:50:04.252855 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" Nov 25 12:50:04 crc kubenswrapper[4675]: I1125 12:50:04.253099 4675 scope.go:117] "RemoveContainer" containerID="0c4a5408da975bda9a1274fe30bb8adffcc858722b1da0f099ef3e83b4754176" Nov 25 12:50:04 crc kubenswrapper[4675]: I1125 12:50:04.276549 4675 scope.go:117] "RemoveContainer" containerID="c9827326323f7a253eedffc9cf8b0bae0ab55665abebcd901aaa3c1b6a386eb6" Nov 25 12:50:04 crc kubenswrapper[4675]: I1125 12:50:04.279567 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-x8wzk"] Nov 25 12:50:04 crc kubenswrapper[4675]: I1125 12:50:04.297867 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-x8wzk"] Nov 25 12:50:05 crc kubenswrapper[4675]: I1125 12:50:05.263552 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a603c80e-1d69-41a1-99fb-dfde13a182d6","Type":"ContainerStarted","Data":"6108b0dc7a0734cdc718350c4b1f2f3560b2830be03d964ddbd7e1d0febedc4e"} Nov 25 12:50:05 crc kubenswrapper[4675]: I1125 12:50:05.263922 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 12:50:05 crc kubenswrapper[4675]: I1125 12:50:05.291340 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.191118733 podStartE2EDuration="6.291318647s" podCreationTimestamp="2025-11-25 12:49:59 +0000 UTC" firstStartedPulling="2025-11-25 12:50:00.527262544 +0000 UTC m=+1345.698854885" lastFinishedPulling="2025-11-25 12:50:04.627462458 +0000 UTC m=+1349.799054799" observedRunningTime="2025-11-25 12:50:05.282287272 +0000 UTC m=+1350.453879643" watchObservedRunningTime="2025-11-25 12:50:05.291318647 +0000 UTC m=+1350.462910988" Nov 25 12:50:05 crc kubenswrapper[4675]: I1125 12:50:05.552342 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" path="/var/lib/kubelet/pods/8f773f1d-b032-4e64-ba15-1ec5cac38caa/volumes" Nov 25 12:50:06 crc kubenswrapper[4675]: I1125 12:50:06.277712 4675 generic.go:334] "Generic (PLEG): container finished" podID="857651f8-5af8-4ed3-95b4-20e2f9417d29" containerID="bac85683d946adfc0e86942464942fd73ad3f74ccdfa42d8c60da5e55cea7822" exitCode=0 Nov 25 12:50:06 crc kubenswrapper[4675]: I1125 12:50:06.278175 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-t8hm9" event={"ID":"857651f8-5af8-4ed3-95b4-20e2f9417d29","Type":"ContainerDied","Data":"bac85683d946adfc0e86942464942fd73ad3f74ccdfa42d8c60da5e55cea7822"} Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.404035 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.431549 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.433730 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.680637 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.774180 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh4mp\" (UniqueName: \"kubernetes.io/projected/857651f8-5af8-4ed3-95b4-20e2f9417d29-kube-api-access-hh4mp\") pod \"857651f8-5af8-4ed3-95b4-20e2f9417d29\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.774258 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-combined-ca-bundle\") pod \"857651f8-5af8-4ed3-95b4-20e2f9417d29\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.774390 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-scripts\") pod \"857651f8-5af8-4ed3-95b4-20e2f9417d29\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.774460 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-config-data\") pod \"857651f8-5af8-4ed3-95b4-20e2f9417d29\" (UID: \"857651f8-5af8-4ed3-95b4-20e2f9417d29\") " Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.780841 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/857651f8-5af8-4ed3-95b4-20e2f9417d29-kube-api-access-hh4mp" (OuterVolumeSpecName: "kube-api-access-hh4mp") pod "857651f8-5af8-4ed3-95b4-20e2f9417d29" (UID: "857651f8-5af8-4ed3-95b4-20e2f9417d29"). InnerVolumeSpecName "kube-api-access-hh4mp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.781172 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-scripts" (OuterVolumeSpecName: "scripts") pod "857651f8-5af8-4ed3-95b4-20e2f9417d29" (UID: "857651f8-5af8-4ed3-95b4-20e2f9417d29"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.818970 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "857651f8-5af8-4ed3-95b4-20e2f9417d29" (UID: "857651f8-5af8-4ed3-95b4-20e2f9417d29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.823298 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-config-data" (OuterVolumeSpecName: "config-data") pod "857651f8-5af8-4ed3-95b4-20e2f9417d29" (UID: "857651f8-5af8-4ed3-95b4-20e2f9417d29"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.876912 4675 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.876954 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.876967 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh4mp\" (UniqueName: \"kubernetes.io/projected/857651f8-5af8-4ed3-95b4-20e2f9417d29-kube-api-access-hh4mp\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:07 crc kubenswrapper[4675]: I1125 12:50:07.876980 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/857651f8-5af8-4ed3-95b4-20e2f9417d29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.170095 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-757b4f8459-x8wzk" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.191:5353: i/o timeout" Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.296081 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-t8hm9" event={"ID":"857651f8-5af8-4ed3-95b4-20e2f9417d29","Type":"ContainerDied","Data":"7505f2963a08f229395bcf4b732f1ce9933fb49d59659eb82d15c8e8fddeeb84"} Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.296171 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7505f2963a08f229395bcf4b732f1ce9933fb49d59659eb82d15c8e8fddeeb84" Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.296169 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-t8hm9" Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.305905 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.489023 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.489254 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-log" containerID="cri-o://300473e1a8157ec44f3b32ab48854dc2894458c85303c1418f17bfaee60d6bdd" gracePeriod=30 Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.489656 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-api" containerID="cri-o://f631c7b18a8798e0707d7b486d9edfa3dfa03ecc8cabfa8b440bd0877518ac1f" gracePeriod=30 Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.508863 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.509468 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" containerName="nova-scheduler-scheduler" containerID="cri-o://6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1" gracePeriod=30 Nov 25 12:50:08 crc kubenswrapper[4675]: I1125 12:50:08.566862 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:50:09 crc kubenswrapper[4675]: E1125 12:50:09.080568 4675 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 12:50:09 crc kubenswrapper[4675]: E1125 12:50:09.093723 4675 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 12:50:09 crc kubenswrapper[4675]: E1125 12:50:09.095760 4675 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 12:50:09 crc kubenswrapper[4675]: E1125 12:50:09.095826 4675 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" containerName="nova-scheduler-scheduler" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.306376 4675 generic.go:334] "Generic (PLEG): container finished" podID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerID="f631c7b18a8798e0707d7b486d9edfa3dfa03ecc8cabfa8b440bd0877518ac1f" exitCode=0 Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.306406 4675 generic.go:334] "Generic (PLEG): container finished" podID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerID="300473e1a8157ec44f3b32ab48854dc2894458c85303c1418f17bfaee60d6bdd" exitCode=143 Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.307198 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"693ad55c-37af-4c85-8326-cf39d4613fdf","Type":"ContainerDied","Data":"f631c7b18a8798e0707d7b486d9edfa3dfa03ecc8cabfa8b440bd0877518ac1f"} Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.307227 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"693ad55c-37af-4c85-8326-cf39d4613fdf","Type":"ContainerDied","Data":"300473e1a8157ec44f3b32ab48854dc2894458c85303c1418f17bfaee60d6bdd"} Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.307238 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"693ad55c-37af-4c85-8326-cf39d4613fdf","Type":"ContainerDied","Data":"557b54a8a029893adf7449510396f28f8aead869431abab7c829fb7f64c30fd8"} Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.307247 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="557b54a8a029893adf7449510396f28f8aead869431abab7c829fb7f64c30fd8" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.368040 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.507010 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjmfw\" (UniqueName: \"kubernetes.io/projected/693ad55c-37af-4c85-8326-cf39d4613fdf-kube-api-access-tjmfw\") pod \"693ad55c-37af-4c85-8326-cf39d4613fdf\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.507051 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-combined-ca-bundle\") pod \"693ad55c-37af-4c85-8326-cf39d4613fdf\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.507161 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-public-tls-certs\") pod \"693ad55c-37af-4c85-8326-cf39d4613fdf\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.507206 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-internal-tls-certs\") pod \"693ad55c-37af-4c85-8326-cf39d4613fdf\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.507263 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693ad55c-37af-4c85-8326-cf39d4613fdf-logs\") pod \"693ad55c-37af-4c85-8326-cf39d4613fdf\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.507315 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-config-data\") pod \"693ad55c-37af-4c85-8326-cf39d4613fdf\" (UID: \"693ad55c-37af-4c85-8326-cf39d4613fdf\") " Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.509313 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/693ad55c-37af-4c85-8326-cf39d4613fdf-logs" (OuterVolumeSpecName: "logs") pod "693ad55c-37af-4c85-8326-cf39d4613fdf" (UID: "693ad55c-37af-4c85-8326-cf39d4613fdf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.519414 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/693ad55c-37af-4c85-8326-cf39d4613fdf-kube-api-access-tjmfw" (OuterVolumeSpecName: "kube-api-access-tjmfw") pod "693ad55c-37af-4c85-8326-cf39d4613fdf" (UID: "693ad55c-37af-4c85-8326-cf39d4613fdf"). InnerVolumeSpecName "kube-api-access-tjmfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.573596 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "693ad55c-37af-4c85-8326-cf39d4613fdf" (UID: "693ad55c-37af-4c85-8326-cf39d4613fdf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.574117 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-config-data" (OuterVolumeSpecName: "config-data") pod "693ad55c-37af-4c85-8326-cf39d4613fdf" (UID: "693ad55c-37af-4c85-8326-cf39d4613fdf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.574977 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "693ad55c-37af-4c85-8326-cf39d4613fdf" (UID: "693ad55c-37af-4c85-8326-cf39d4613fdf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.582732 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "693ad55c-37af-4c85-8326-cf39d4613fdf" (UID: "693ad55c-37af-4c85-8326-cf39d4613fdf"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.609085 4675 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.609116 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693ad55c-37af-4c85-8326-cf39d4613fdf-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.609128 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.609137 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjmfw\" (UniqueName: \"kubernetes.io/projected/693ad55c-37af-4c85-8326-cf39d4613fdf-kube-api-access-tjmfw\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.609146 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:09 crc kubenswrapper[4675]: I1125 12:50:09.609155 4675 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693ad55c-37af-4c85-8326-cf39d4613fdf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.325068 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.325506 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-log" containerID="cri-o://2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a" gracePeriod=30 Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.326270 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-metadata" containerID="cri-o://64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543" gracePeriod=30 Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.359987 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.373948 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400154 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:10 crc kubenswrapper[4675]: E1125 12:50:10.400581 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="857651f8-5af8-4ed3-95b4-20e2f9417d29" containerName="nova-manage" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400601 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="857651f8-5af8-4ed3-95b4-20e2f9417d29" containerName="nova-manage" Nov 25 12:50:10 crc kubenswrapper[4675]: E1125 12:50:10.400615 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-api" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400622 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-api" Nov 25 12:50:10 crc kubenswrapper[4675]: E1125 12:50:10.400635 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-log" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400643 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-log" Nov 25 12:50:10 crc kubenswrapper[4675]: E1125 12:50:10.400660 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="init" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400667 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="init" Nov 25 12:50:10 crc kubenswrapper[4675]: E1125 12:50:10.400711 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="dnsmasq-dns" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400718 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="dnsmasq-dns" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400931 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f773f1d-b032-4e64-ba15-1ec5cac38caa" containerName="dnsmasq-dns" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400956 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-api" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400968 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="857651f8-5af8-4ed3-95b4-20e2f9417d29" containerName="nova-manage" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.400984 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" containerName="nova-api-log" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.402154 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.406277 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.406349 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.406277 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.488188 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.528313 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-config-data\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.528388 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf087269-8e7f-416e-9492-b3ccb72f40d0-logs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.528429 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.528453 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.528503 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-public-tls-certs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.528580 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67fqr\" (UniqueName: \"kubernetes.io/projected/cf087269-8e7f-416e-9492-b3ccb72f40d0-kube-api-access-67fqr\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.629959 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67fqr\" (UniqueName: \"kubernetes.io/projected/cf087269-8e7f-416e-9492-b3ccb72f40d0-kube-api-access-67fqr\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.631026 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-config-data\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.631121 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf087269-8e7f-416e-9492-b3ccb72f40d0-logs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.631179 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.631217 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.631298 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-public-tls-certs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.631506 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf087269-8e7f-416e-9492-b3ccb72f40d0-logs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.635753 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.635766 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.637423 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-config-data\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.643394 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf087269-8e7f-416e-9492-b3ccb72f40d0-public-tls-certs\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.654079 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67fqr\" (UniqueName: \"kubernetes.io/projected/cf087269-8e7f-416e-9492-b3ccb72f40d0-kube-api-access-67fqr\") pod \"nova-api-0\" (UID: \"cf087269-8e7f-416e-9492-b3ccb72f40d0\") " pod="openstack/nova-api-0" Nov 25 12:50:10 crc kubenswrapper[4675]: I1125 12:50:10.722564 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 12:50:11 crc kubenswrapper[4675]: I1125 12:50:11.259667 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 12:50:11 crc kubenswrapper[4675]: I1125 12:50:11.339492 4675 generic.go:334] "Generic (PLEG): container finished" podID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerID="2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a" exitCode=143 Nov 25 12:50:11 crc kubenswrapper[4675]: I1125 12:50:11.339559 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5","Type":"ContainerDied","Data":"2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a"} Nov 25 12:50:11 crc kubenswrapper[4675]: I1125 12:50:11.340850 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf087269-8e7f-416e-9492-b3ccb72f40d0","Type":"ContainerStarted","Data":"af41bd18e078045e796ba99a98364b4f80bd9db557edf0911b70de74eb0b9f8f"} Nov 25 12:50:11 crc kubenswrapper[4675]: I1125 12:50:11.543860 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="693ad55c-37af-4c85-8326-cf39d4613fdf" path="/var/lib/kubelet/pods/693ad55c-37af-4c85-8326-cf39d4613fdf/volumes" Nov 25 12:50:12 crc kubenswrapper[4675]: I1125 12:50:12.350619 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf087269-8e7f-416e-9492-b3ccb72f40d0","Type":"ContainerStarted","Data":"1b3ac886945a018acfa25aa310399e0417e4943fc01eb7afa22a41bbd58b2214"} Nov 25 12:50:12 crc kubenswrapper[4675]: I1125 12:50:12.350959 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf087269-8e7f-416e-9492-b3ccb72f40d0","Type":"ContainerStarted","Data":"b2a87efa68d69c18c9fc17e07e118088b96fbfe88a23e9fcf4b90fe2ead1e021"} Nov 25 12:50:12 crc kubenswrapper[4675]: I1125 12:50:12.374268 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.374253039 podStartE2EDuration="2.374253039s" podCreationTimestamp="2025-11-25 12:50:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:50:12.368690113 +0000 UTC m=+1357.540282454" watchObservedRunningTime="2025-11-25 12:50:12.374253039 +0000 UTC m=+1357.545845380" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.362136 4675 generic.go:334] "Generic (PLEG): container finished" podID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" containerID="6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1" exitCode=0 Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.363432 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0283b9a-4d82-4a68-9b49-31fde9eda7b6","Type":"ContainerDied","Data":"6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1"} Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.662489 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.662544 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.662604 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.663360 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d19f1130a91b11d8bc294a8adf419715b599ba329818568ac60752a3ee96613c"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.663423 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://d19f1130a91b11d8bc294a8adf419715b599ba329818568ac60752a3ee96613c" gracePeriod=600 Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.726435 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.762561 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.199:8775/\": read tcp 10.217.0.2:59430->10.217.0.199:8775: read: connection reset by peer" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.762929 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.199:8775/\": read tcp 10.217.0.2:59428->10.217.0.199:8775: read: connection reset by peer" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.902671 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dwpt\" (UniqueName: \"kubernetes.io/projected/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-kube-api-access-8dwpt\") pod \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.902901 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-config-data\") pod \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.902958 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-combined-ca-bundle\") pod \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\" (UID: \"f0283b9a-4d82-4a68-9b49-31fde9eda7b6\") " Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.910521 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-kube-api-access-8dwpt" (OuterVolumeSpecName: "kube-api-access-8dwpt") pod "f0283b9a-4d82-4a68-9b49-31fde9eda7b6" (UID: "f0283b9a-4d82-4a68-9b49-31fde9eda7b6"). InnerVolumeSpecName "kube-api-access-8dwpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.934949 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-config-data" (OuterVolumeSpecName: "config-data") pod "f0283b9a-4d82-4a68-9b49-31fde9eda7b6" (UID: "f0283b9a-4d82-4a68-9b49-31fde9eda7b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:13 crc kubenswrapper[4675]: I1125 12:50:13.953113 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0283b9a-4d82-4a68-9b49-31fde9eda7b6" (UID: "f0283b9a-4d82-4a68-9b49-31fde9eda7b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.005530 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dwpt\" (UniqueName: \"kubernetes.io/projected/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-kube-api-access-8dwpt\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.005574 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.005590 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0283b9a-4d82-4a68-9b49-31fde9eda7b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.194995 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.309637 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-nova-metadata-tls-certs\") pod \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.309760 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-logs\") pod \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.309827 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-config-data\") pod \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.309876 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqjzw\" (UniqueName: \"kubernetes.io/projected/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-kube-api-access-zqjzw\") pod \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.309992 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-combined-ca-bundle\") pod \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\" (UID: \"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5\") " Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.311351 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-logs" (OuterVolumeSpecName: "logs") pod "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" (UID: "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.317432 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-kube-api-access-zqjzw" (OuterVolumeSpecName: "kube-api-access-zqjzw") pod "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" (UID: "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5"). InnerVolumeSpecName "kube-api-access-zqjzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.379362 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" (UID: "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.393197 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" (UID: "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.399999 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-config-data" (OuterVolumeSpecName: "config-data") pod "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" (UID: "20b9d608-f69e-4ae9-9ff0-c185db9a2ec5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.406801 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0283b9a-4d82-4a68-9b49-31fde9eda7b6","Type":"ContainerDied","Data":"d623bdc4526c89600a360cad5d0e674ff5f3df913a6bd16c927d7b8b946c7352"} Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.407119 4675 scope.go:117] "RemoveContainer" containerID="6c0b9c311cbfad9008215ac118e67f931cdd3981e4d425b234b92350d528c5b1" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.407073 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.412931 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.413021 4675 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.413039 4675 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-logs\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.413051 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.413063 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqjzw\" (UniqueName: \"kubernetes.io/projected/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5-kube-api-access-zqjzw\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.413882 4675 generic.go:334] "Generic (PLEG): container finished" podID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerID="64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543" exitCode=0 Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.413973 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5","Type":"ContainerDied","Data":"64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543"} Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.414305 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20b9d608-f69e-4ae9-9ff0-c185db9a2ec5","Type":"ContainerDied","Data":"01dfbbde2137bc367035098a1dc3b7a919c3f6f13207416d2f2cf2bf0fb07e04"} Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.414387 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.436357 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="d19f1130a91b11d8bc294a8adf419715b599ba329818568ac60752a3ee96613c" exitCode=0 Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.436399 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"d19f1130a91b11d8bc294a8adf419715b599ba329818568ac60752a3ee96613c"} Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.436424 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61"} Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.492700 4675 scope.go:117] "RemoveContainer" containerID="64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.503896 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.523878 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.544612 4675 scope.go:117] "RemoveContainer" containerID="2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.553270 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: E1125 12:50:14.553739 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-metadata" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.553761 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-metadata" Nov 25 12:50:14 crc kubenswrapper[4675]: E1125 12:50:14.553784 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" containerName="nova-scheduler-scheduler" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.553791 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" containerName="nova-scheduler-scheduler" Nov 25 12:50:14 crc kubenswrapper[4675]: E1125 12:50:14.553797 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-log" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.553804 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-log" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.554035 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-metadata" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.554058 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" containerName="nova-metadata-log" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.554084 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" containerName="nova-scheduler-scheduler" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.554842 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.562932 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.575345 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.589545 4675 scope.go:117] "RemoveContainer" containerID="64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543" Nov 25 12:50:14 crc kubenswrapper[4675]: E1125 12:50:14.593309 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543\": container with ID starting with 64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543 not found: ID does not exist" containerID="64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.593361 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543"} err="failed to get container status \"64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543\": rpc error: code = NotFound desc = could not find container \"64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543\": container with ID starting with 64c901ab43a4e93dc6f051b4f9b5a43d26c3fe5ca3a3fbbcdfc6d023b1b88543 not found: ID does not exist" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.593393 4675 scope.go:117] "RemoveContainer" containerID="2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.593978 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: E1125 12:50:14.595374 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a\": container with ID starting with 2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a not found: ID does not exist" containerID="2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.595423 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a"} err="failed to get container status \"2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a\": rpc error: code = NotFound desc = could not find container \"2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a\": container with ID starting with 2ecec8dc9679e4ac21915722bc058ff3869170a094d9c0773bde5ccf90e9512a not found: ID does not exist" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.595443 4675 scope.go:117] "RemoveContainer" containerID="0197b926bc3d167c79e69a56309b09a9d11fe04e548d7c1fd85fdf36e1e96e54" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.612135 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.623094 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.626042 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.629334 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.629532 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.634179 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.723783 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/431204a3-00f5-425a-b473-86f86e2bc600-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.723977 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzb9x\" (UniqueName: \"kubernetes.io/projected/431204a3-00f5-425a-b473-86f86e2bc600-kube-api-access-dzb9x\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.724049 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/431204a3-00f5-425a-b473-86f86e2bc600-config-data\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.724101 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.724138 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.724164 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-config-data\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.724209 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3cab755-3df5-4cfb-880e-f842da175aeb-logs\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.724300 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9pcn\" (UniqueName: \"kubernetes.io/projected/f3cab755-3df5-4cfb-880e-f842da175aeb-kube-api-access-l9pcn\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826302 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-config-data\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826347 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3cab755-3df5-4cfb-880e-f842da175aeb-logs\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826416 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9pcn\" (UniqueName: \"kubernetes.io/projected/f3cab755-3df5-4cfb-880e-f842da175aeb-kube-api-access-l9pcn\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826445 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/431204a3-00f5-425a-b473-86f86e2bc600-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826530 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzb9x\" (UniqueName: \"kubernetes.io/projected/431204a3-00f5-425a-b473-86f86e2bc600-kube-api-access-dzb9x\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826575 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/431204a3-00f5-425a-b473-86f86e2bc600-config-data\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826619 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826648 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.826955 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3cab755-3df5-4cfb-880e-f842da175aeb-logs\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.830961 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/431204a3-00f5-425a-b473-86f86e2bc600-config-data\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.831836 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.831915 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.832365 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/431204a3-00f5-425a-b473-86f86e2bc600-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.834450 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3cab755-3df5-4cfb-880e-f842da175aeb-config-data\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.847710 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9pcn\" (UniqueName: \"kubernetes.io/projected/f3cab755-3df5-4cfb-880e-f842da175aeb-kube-api-access-l9pcn\") pod \"nova-metadata-0\" (UID: \"f3cab755-3df5-4cfb-880e-f842da175aeb\") " pod="openstack/nova-metadata-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.847934 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzb9x\" (UniqueName: \"kubernetes.io/projected/431204a3-00f5-425a-b473-86f86e2bc600-kube-api-access-dzb9x\") pod \"nova-scheduler-0\" (UID: \"431204a3-00f5-425a-b473-86f86e2bc600\") " pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.890804 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 12:50:14 crc kubenswrapper[4675]: I1125 12:50:14.949734 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 12:50:15 crc kubenswrapper[4675]: I1125 12:50:15.422315 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 12:50:15 crc kubenswrapper[4675]: I1125 12:50:15.465195 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"431204a3-00f5-425a-b473-86f86e2bc600","Type":"ContainerStarted","Data":"3cc3745b596443e7c8658edfd2db5ecfe9447aaa07516ebebf7039fecf468de3"} Nov 25 12:50:15 crc kubenswrapper[4675]: I1125 12:50:15.515880 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 12:50:15 crc kubenswrapper[4675]: W1125 12:50:15.525508 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3cab755_3df5_4cfb_880e_f842da175aeb.slice/crio-5c9289303204a2baf1adafddb04900f82889878d4db4489849d65a49253969fe WatchSource:0}: Error finding container 5c9289303204a2baf1adafddb04900f82889878d4db4489849d65a49253969fe: Status 404 returned error can't find the container with id 5c9289303204a2baf1adafddb04900f82889878d4db4489849d65a49253969fe Nov 25 12:50:15 crc kubenswrapper[4675]: I1125 12:50:15.563726 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b9d608-f69e-4ae9-9ff0-c185db9a2ec5" path="/var/lib/kubelet/pods/20b9d608-f69e-4ae9-9ff0-c185db9a2ec5/volumes" Nov 25 12:50:15 crc kubenswrapper[4675]: I1125 12:50:15.565199 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0283b9a-4d82-4a68-9b49-31fde9eda7b6" path="/var/lib/kubelet/pods/f0283b9a-4d82-4a68-9b49-31fde9eda7b6/volumes" Nov 25 12:50:16 crc kubenswrapper[4675]: I1125 12:50:16.478326 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3cab755-3df5-4cfb-880e-f842da175aeb","Type":"ContainerStarted","Data":"c8aae23671a34a99b165cf987e27340c7ce62d2897b3eda359a569c0df3dab89"} Nov 25 12:50:16 crc kubenswrapper[4675]: I1125 12:50:16.478792 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3cab755-3df5-4cfb-880e-f842da175aeb","Type":"ContainerStarted","Data":"3a0601eac0ca4a14d0dbacd8e77c0881602d5366cfbd54189b73e9f53cae92d3"} Nov 25 12:50:16 crc kubenswrapper[4675]: I1125 12:50:16.478805 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3cab755-3df5-4cfb-880e-f842da175aeb","Type":"ContainerStarted","Data":"5c9289303204a2baf1adafddb04900f82889878d4db4489849d65a49253969fe"} Nov 25 12:50:16 crc kubenswrapper[4675]: I1125 12:50:16.479920 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"431204a3-00f5-425a-b473-86f86e2bc600","Type":"ContainerStarted","Data":"ff3fb4bfff5b5e53568962b5f606d4fdb1e58e78ce53d18d76f9614c185e20a1"} Nov 25 12:50:16 crc kubenswrapper[4675]: I1125 12:50:16.505221 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.505202322 podStartE2EDuration="2.505202322s" podCreationTimestamp="2025-11-25 12:50:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:50:16.493170483 +0000 UTC m=+1361.664762834" watchObservedRunningTime="2025-11-25 12:50:16.505202322 +0000 UTC m=+1361.676794663" Nov 25 12:50:16 crc kubenswrapper[4675]: I1125 12:50:16.515503 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.515482676 podStartE2EDuration="2.515482676s" podCreationTimestamp="2025-11-25 12:50:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:50:16.506417061 +0000 UTC m=+1361.678009402" watchObservedRunningTime="2025-11-25 12:50:16.515482676 +0000 UTC m=+1361.687075017" Nov 25 12:50:19 crc kubenswrapper[4675]: I1125 12:50:19.890910 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 12:50:19 crc kubenswrapper[4675]: I1125 12:50:19.951198 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 12:50:19 crc kubenswrapper[4675]: I1125 12:50:19.951333 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 12:50:20 crc kubenswrapper[4675]: I1125 12:50:20.723684 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 12:50:20 crc kubenswrapper[4675]: I1125 12:50:20.724009 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 12:50:21 crc kubenswrapper[4675]: I1125 12:50:21.772062 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cf087269-8e7f-416e-9492-b3ccb72f40d0" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:50:21 crc kubenswrapper[4675]: I1125 12:50:21.772062 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cf087269-8e7f-416e-9492-b3ccb72f40d0" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 12:50:24 crc kubenswrapper[4675]: I1125 12:50:24.891897 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 12:50:24 crc kubenswrapper[4675]: I1125 12:50:24.919099 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 12:50:24 crc kubenswrapper[4675]: I1125 12:50:24.950846 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 12:50:24 crc kubenswrapper[4675]: I1125 12:50:24.950904 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 12:50:25 crc kubenswrapper[4675]: I1125 12:50:25.602871 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 12:50:25 crc kubenswrapper[4675]: I1125 12:50:25.964115 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f3cab755-3df5-4cfb-880e-f842da175aeb" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.206:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:50:25 crc kubenswrapper[4675]: I1125 12:50:25.964332 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f3cab755-3df5-4cfb-880e-f842da175aeb" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.206:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 12:50:29 crc kubenswrapper[4675]: I1125 12:50:29.934953 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 12:50:30 crc kubenswrapper[4675]: I1125 12:50:30.729185 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 12:50:30 crc kubenswrapper[4675]: I1125 12:50:30.729843 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 12:50:30 crc kubenswrapper[4675]: I1125 12:50:30.738177 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 12:50:30 crc kubenswrapper[4675]: I1125 12:50:30.745326 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 12:50:31 crc kubenswrapper[4675]: I1125 12:50:31.622438 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 12:50:31 crc kubenswrapper[4675]: I1125 12:50:31.630042 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 12:50:34 crc kubenswrapper[4675]: I1125 12:50:34.955409 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 12:50:34 crc kubenswrapper[4675]: I1125 12:50:34.955873 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 12:50:34 crc kubenswrapper[4675]: I1125 12:50:34.962330 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 12:50:34 crc kubenswrapper[4675]: I1125 12:50:34.962735 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.438369 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zhf9f"] Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.445078 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.482800 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zhf9f"] Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.503467 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-utilities\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.505580 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-catalog-content\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.505730 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvk2m\" (UniqueName: \"kubernetes.io/projected/c6c063ab-bca4-4904-98ed-b9079e6030a8-kube-api-access-hvk2m\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.608149 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-utilities\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.608288 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-catalog-content\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.608339 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvk2m\" (UniqueName: \"kubernetes.io/projected/c6c063ab-bca4-4904-98ed-b9079e6030a8-kube-api-access-hvk2m\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.608853 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-utilities\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.608969 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-catalog-content\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.635149 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvk2m\" (UniqueName: \"kubernetes.io/projected/c6c063ab-bca4-4904-98ed-b9079e6030a8-kube-api-access-hvk2m\") pod \"redhat-marketplace-zhf9f\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:39 crc kubenswrapper[4675]: I1125 12:50:39.802541 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:40 crc kubenswrapper[4675]: I1125 12:50:40.305300 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zhf9f"] Nov 25 12:50:40 crc kubenswrapper[4675]: I1125 12:50:40.710187 4675 generic.go:334] "Generic (PLEG): container finished" podID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerID="5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404" exitCode=0 Nov 25 12:50:40 crc kubenswrapper[4675]: I1125 12:50:40.710237 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerDied","Data":"5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404"} Nov 25 12:50:40 crc kubenswrapper[4675]: I1125 12:50:40.710269 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerStarted","Data":"23d1616f223c3b89e51a60c467892a28ba9d70be0e7e181c36a7557920b9fb67"} Nov 25 12:50:42 crc kubenswrapper[4675]: I1125 12:50:42.735729 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerStarted","Data":"5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d"} Nov 25 12:50:43 crc kubenswrapper[4675]: I1125 12:50:43.195847 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:50:43 crc kubenswrapper[4675]: I1125 12:50:43.747343 4675 generic.go:334] "Generic (PLEG): container finished" podID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerID="5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d" exitCode=0 Nov 25 12:50:43 crc kubenswrapper[4675]: I1125 12:50:43.747398 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerDied","Data":"5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d"} Nov 25 12:50:44 crc kubenswrapper[4675]: I1125 12:50:44.335345 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:50:44 crc kubenswrapper[4675]: I1125 12:50:44.759365 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerStarted","Data":"10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c"} Nov 25 12:50:44 crc kubenswrapper[4675]: I1125 12:50:44.789510 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zhf9f" podStartSLOduration=2.010418429 podStartE2EDuration="5.789487801s" podCreationTimestamp="2025-11-25 12:50:39 +0000 UTC" firstStartedPulling="2025-11-25 12:50:40.712154107 +0000 UTC m=+1385.883746448" lastFinishedPulling="2025-11-25 12:50:44.491223479 +0000 UTC m=+1389.662815820" observedRunningTime="2025-11-25 12:50:44.778579056 +0000 UTC m=+1389.950171407" watchObservedRunningTime="2025-11-25 12:50:44.789487801 +0000 UTC m=+1389.961080142" Nov 25 12:50:48 crc kubenswrapper[4675]: I1125 12:50:48.538671 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="rabbitmq" containerID="cri-o://9ec7f903a2f764b8fd62426ad761cb8d54f5cf6f3d514ea6503adcdc93798c10" gracePeriod=604795 Nov 25 12:50:49 crc kubenswrapper[4675]: I1125 12:50:49.803521 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:49 crc kubenswrapper[4675]: I1125 12:50:49.803861 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:49 crc kubenswrapper[4675]: I1125 12:50:49.854705 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:49 crc kubenswrapper[4675]: I1125 12:50:49.884444 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="rabbitmq" containerID="cri-o://eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a" gracePeriod=604795 Nov 25 12:50:50 crc kubenswrapper[4675]: I1125 12:50:50.881873 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:50 crc kubenswrapper[4675]: I1125 12:50:50.931921 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zhf9f"] Nov 25 12:50:52 crc kubenswrapper[4675]: I1125 12:50:52.312849 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Nov 25 12:50:52 crc kubenswrapper[4675]: I1125 12:50:52.709007 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 25 12:50:52 crc kubenswrapper[4675]: I1125 12:50:52.852220 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zhf9f" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="registry-server" containerID="cri-o://10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c" gracePeriod=2 Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.415535 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.492522 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-utilities\") pod \"c6c063ab-bca4-4904-98ed-b9079e6030a8\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.492601 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-catalog-content\") pod \"c6c063ab-bca4-4904-98ed-b9079e6030a8\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.492770 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvk2m\" (UniqueName: \"kubernetes.io/projected/c6c063ab-bca4-4904-98ed-b9079e6030a8-kube-api-access-hvk2m\") pod \"c6c063ab-bca4-4904-98ed-b9079e6030a8\" (UID: \"c6c063ab-bca4-4904-98ed-b9079e6030a8\") " Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.493286 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-utilities" (OuterVolumeSpecName: "utilities") pod "c6c063ab-bca4-4904-98ed-b9079e6030a8" (UID: "c6c063ab-bca4-4904-98ed-b9079e6030a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.499755 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6c063ab-bca4-4904-98ed-b9079e6030a8-kube-api-access-hvk2m" (OuterVolumeSpecName: "kube-api-access-hvk2m") pod "c6c063ab-bca4-4904-98ed-b9079e6030a8" (UID: "c6c063ab-bca4-4904-98ed-b9079e6030a8"). InnerVolumeSpecName "kube-api-access-hvk2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.510672 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6c063ab-bca4-4904-98ed-b9079e6030a8" (UID: "c6c063ab-bca4-4904-98ed-b9079e6030a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.594799 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.594852 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c063ab-bca4-4904-98ed-b9079e6030a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.594866 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvk2m\" (UniqueName: \"kubernetes.io/projected/c6c063ab-bca4-4904-98ed-b9079e6030a8-kube-api-access-hvk2m\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.863783 4675 generic.go:334] "Generic (PLEG): container finished" podID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerID="10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c" exitCode=0 Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.863856 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerDied","Data":"10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c"} Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.863895 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zhf9f" event={"ID":"c6c063ab-bca4-4904-98ed-b9079e6030a8","Type":"ContainerDied","Data":"23d1616f223c3b89e51a60c467892a28ba9d70be0e7e181c36a7557920b9fb67"} Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.863894 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zhf9f" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.863915 4675 scope.go:117] "RemoveContainer" containerID="10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.890546 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zhf9f"] Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.892413 4675 scope.go:117] "RemoveContainer" containerID="5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.900529 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zhf9f"] Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.920484 4675 scope.go:117] "RemoveContainer" containerID="5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.963178 4675 scope.go:117] "RemoveContainer" containerID="10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c" Nov 25 12:50:53 crc kubenswrapper[4675]: E1125 12:50:53.963680 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c\": container with ID starting with 10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c not found: ID does not exist" containerID="10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.963720 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c"} err="failed to get container status \"10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c\": rpc error: code = NotFound desc = could not find container \"10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c\": container with ID starting with 10264ecd98a4bd445364f00c8fba75aa826f309f90d7aca711316eb0ba35b63c not found: ID does not exist" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.963746 4675 scope.go:117] "RemoveContainer" containerID="5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d" Nov 25 12:50:53 crc kubenswrapper[4675]: E1125 12:50:53.964134 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d\": container with ID starting with 5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d not found: ID does not exist" containerID="5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.964159 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d"} err="failed to get container status \"5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d\": rpc error: code = NotFound desc = could not find container \"5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d\": container with ID starting with 5acc955ed6a2a0f55159783c58019a91dea6d62681f8f77aed17054faff6983d not found: ID does not exist" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.964175 4675 scope.go:117] "RemoveContainer" containerID="5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404" Nov 25 12:50:53 crc kubenswrapper[4675]: E1125 12:50:53.968711 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404\": container with ID starting with 5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404 not found: ID does not exist" containerID="5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404" Nov 25 12:50:53 crc kubenswrapper[4675]: I1125 12:50:53.968751 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404"} err="failed to get container status \"5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404\": rpc error: code = NotFound desc = could not find container \"5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404\": container with ID starting with 5431a554970e53e4319699ed30fe7438390c8084beeec54f70d8ea561e417404 not found: ID does not exist" Nov 25 12:50:54 crc kubenswrapper[4675]: I1125 12:50:54.892250 4675 generic.go:334] "Generic (PLEG): container finished" podID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerID="9ec7f903a2f764b8fd62426ad761cb8d54f5cf6f3d514ea6503adcdc93798c10" exitCode=0 Nov 25 12:50:54 crc kubenswrapper[4675]: I1125 12:50:54.892563 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"24ebc7c8-8b87-487b-90cb-7c26a047b956","Type":"ContainerDied","Data":"9ec7f903a2f764b8fd62426ad761cb8d54f5cf6f3d514ea6503adcdc93798c10"} Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.065001 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.120868 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-erlang-cookie\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.120934 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjt7z\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-kube-api-access-cjt7z\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.120986 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-plugins-conf\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121087 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-config-data\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121103 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121132 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-tls\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121149 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24ebc7c8-8b87-487b-90cb-7c26a047b956-erlang-cookie-secret\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121182 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24ebc7c8-8b87-487b-90cb-7c26a047b956-pod-info\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121232 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-plugins\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121294 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-server-conf\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.121338 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-confd\") pod \"24ebc7c8-8b87-487b-90cb-7c26a047b956\" (UID: \"24ebc7c8-8b87-487b-90cb-7c26a047b956\") " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.134993 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.135446 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.138455 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.176168 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "persistence") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.187659 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.212201 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/24ebc7c8-8b87-487b-90cb-7c26a047b956-pod-info" (OuterVolumeSpecName: "pod-info") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.212217 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24ebc7c8-8b87-487b-90cb-7c26a047b956-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.234485 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-kube-api-access-cjt7z" (OuterVolumeSpecName: "kube-api-access-cjt7z") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "kube-api-access-cjt7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244398 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244446 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244461 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjt7z\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-kube-api-access-cjt7z\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244485 4675 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244543 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244557 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244571 4675 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24ebc7c8-8b87-487b-90cb-7c26a047b956-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.244585 4675 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24ebc7c8-8b87-487b-90cb-7c26a047b956-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.278385 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-config-data" (OuterVolumeSpecName: "config-data") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.303935 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-server-conf" (OuterVolumeSpecName: "server-conf") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.310758 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.346109 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.346327 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.346337 4675 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24ebc7c8-8b87-487b-90cb-7c26a047b956-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.352931 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "24ebc7c8-8b87-487b-90cb-7c26a047b956" (UID: "24ebc7c8-8b87-487b-90cb-7c26a047b956"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.447668 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24ebc7c8-8b87-487b-90cb-7c26a047b956-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.544237 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" path="/var/lib/kubelet/pods/c6c063ab-bca4-4904-98ed-b9079e6030a8/volumes" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.906030 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"24ebc7c8-8b87-487b-90cb-7c26a047b956","Type":"ContainerDied","Data":"436a587a89fafc68b79c485ef5fc56d0e6689bbc1f404f1d8f2d3acaae1f5c7b"} Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.906088 4675 scope.go:117] "RemoveContainer" containerID="9ec7f903a2f764b8fd62426ad761cb8d54f5cf6f3d514ea6503adcdc93798c10" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.906092 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.935887 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.944002 4675 scope.go:117] "RemoveContainer" containerID="9172f56922c897a590a4b50e195c278eae03e0cde37d71e1e27ae75d11b847eb" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.946492 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.971596 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:50:55 crc kubenswrapper[4675]: E1125 12:50:55.971973 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="extract-utilities" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.971990 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="extract-utilities" Nov 25 12:50:55 crc kubenswrapper[4675]: E1125 12:50:55.972011 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="setup-container" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.972017 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="setup-container" Nov 25 12:50:55 crc kubenswrapper[4675]: E1125 12:50:55.972024 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="rabbitmq" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.972030 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="rabbitmq" Nov 25 12:50:55 crc kubenswrapper[4675]: E1125 12:50:55.972050 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="extract-content" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.972056 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="extract-content" Nov 25 12:50:55 crc kubenswrapper[4675]: E1125 12:50:55.972071 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="registry-server" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.972076 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="registry-server" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.972243 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" containerName="rabbitmq" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.972266 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6c063ab-bca4-4904-98ed-b9079e6030a8" containerName="registry-server" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.973171 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.979023 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.979041 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.980547 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.984579 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.985500 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.985657 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.985992 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xgbxq" Nov 25 12:50:55 crc kubenswrapper[4675]: I1125 12:50:55.998010 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060276 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060409 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060438 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060456 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060485 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060514 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060533 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m76s\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-kube-api-access-2m76s\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060607 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060660 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060727 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.060776 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162607 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162657 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m76s\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-kube-api-access-2m76s\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162688 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162722 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162769 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162799 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162864 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162948 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.162983 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.163005 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.163041 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.163685 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.164073 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.164487 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.164959 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.165014 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.166607 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.168050 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.170213 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.171504 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.176910 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.184940 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m76s\" (UniqueName: \"kubernetes.io/projected/aa5e2576-e3fb-44a7-83ad-6193b6437ae0-kube-api-access-2m76s\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.209169 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"aa5e2576-e3fb-44a7-83ad-6193b6437ae0\") " pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.302156 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.488154 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.608778 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd8gq\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-kube-api-access-qd8gq\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.608932 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-confd\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.608997 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-erlang-cookie-secret\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609037 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-tls\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609077 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-erlang-cookie\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609124 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-pod-info\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609155 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-server-conf\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609188 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-plugins\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609226 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-plugins-conf\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609275 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-config-data\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.609297 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\" (UID: \"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b\") " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.625693 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.628346 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.631967 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.636676 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.640990 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.642344 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-kube-api-access-qd8gq" (OuterVolumeSpecName: "kube-api-access-qd8gq") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "kube-api-access-qd8gq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.645196 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.657861 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-pod-info" (OuterVolumeSpecName: "pod-info") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.679350 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-config-data" (OuterVolumeSpecName: "config-data") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.711793 4675 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713042 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713086 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713109 4675 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713119 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713128 4675 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713137 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713163 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.713174 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd8gq\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-kube-api-access-qd8gq\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.724104 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-server-conf" (OuterVolumeSpecName: "server-conf") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.734063 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.764656 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" (UID: "ca5edac0-6e16-45e1-8d9e-8cec8479bd8b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.815231 4675 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.815265 4675 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.815279 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.917122 4675 generic.go:334] "Generic (PLEG): container finished" podID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerID="eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a" exitCode=0 Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.917206 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b","Type":"ContainerDied","Data":"eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a"} Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.917237 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca5edac0-6e16-45e1-8d9e-8cec8479bd8b","Type":"ContainerDied","Data":"2d0edc35a9d49ec84a82f7b5afcfd6d0e99c9756323145b1f5c77de414c037a7"} Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.917258 4675 scope.go:117] "RemoveContainer" containerID="eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.917373 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.958132 4675 scope.go:117] "RemoveContainer" containerID="440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a" Nov 25 12:50:56 crc kubenswrapper[4675]: I1125 12:50:56.992803 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.017688 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.018024 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:50:57 crc kubenswrapper[4675]: E1125 12:50:57.018346 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="rabbitmq" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.018357 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="rabbitmq" Nov 25 12:50:57 crc kubenswrapper[4675]: E1125 12:50:57.018374 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="setup-container" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.018381 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="setup-container" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.018571 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" containerName="rabbitmq" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.019760 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.049902 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.050153 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.050274 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.050394 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6mcdr" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.050535 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.050659 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.050858 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.069667 4675 scope.go:117] "RemoveContainer" containerID="eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a" Nov 25 12:50:57 crc kubenswrapper[4675]: E1125 12:50:57.072024 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a\": container with ID starting with eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a not found: ID does not exist" containerID="eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.072066 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a"} err="failed to get container status \"eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a\": rpc error: code = NotFound desc = could not find container \"eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a\": container with ID starting with eb0676f275f2de6bebe31763a0c52b9d0ef5dcc9accf5e172141b6c72b5c063a not found: ID does not exist" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.072104 4675 scope.go:117] "RemoveContainer" containerID="440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a" Nov 25 12:50:57 crc kubenswrapper[4675]: E1125 12:50:57.074965 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a\": container with ID starting with 440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a not found: ID does not exist" containerID="440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.074997 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a"} err="failed to get container status \"440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a\": rpc error: code = NotFound desc = could not find container \"440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a\": container with ID starting with 440c36f01b5d7a4fad492ad04bb91bdfa5b41dd8fc34ee2ffe1a6004a38f642a not found: ID does not exist" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.075435 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.083247 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.137865 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.137970 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138118 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138190 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138216 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138645 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgpb8\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-kube-api-access-fgpb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138752 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13f3fc9e-df33-4016-8d7e-a40112cdc27f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138791 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138880 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.138936 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13f3fc9e-df33-4016-8d7e-a40112cdc27f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.139004 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240159 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240231 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240276 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240300 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240317 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240354 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgpb8\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-kube-api-access-fgpb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240396 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13f3fc9e-df33-4016-8d7e-a40112cdc27f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240432 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240479 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240494 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13f3fc9e-df33-4016-8d7e-a40112cdc27f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.240528 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.241238 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.241780 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.242771 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.245100 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.245256 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.245802 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13f3fc9e-df33-4016-8d7e-a40112cdc27f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.249083 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13f3fc9e-df33-4016-8d7e-a40112cdc27f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.249562 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13f3fc9e-df33-4016-8d7e-a40112cdc27f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.250143 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.261964 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.263383 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgpb8\" (UniqueName: \"kubernetes.io/projected/13f3fc9e-df33-4016-8d7e-a40112cdc27f-kube-api-access-fgpb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.284498 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13f3fc9e-df33-4016-8d7e-a40112cdc27f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.471044 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.545561 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24ebc7c8-8b87-487b-90cb-7c26a047b956" path="/var/lib/kubelet/pods/24ebc7c8-8b87-487b-90cb-7c26a047b956/volumes" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.548207 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca5edac0-6e16-45e1-8d9e-8cec8479bd8b" path="/var/lib/kubelet/pods/ca5edac0-6e16-45e1-8d9e-8cec8479bd8b/volumes" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.709640 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ql2wz"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.716344 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.722527 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ql2wz"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.852171 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-utilities\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.852559 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-catalog-content\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.852601 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9tf2\" (UniqueName: \"kubernetes.io/projected/19d77559-bdf5-466a-9189-0c1e894fac2c-kube-api-access-c9tf2\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.926630 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.935063 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa5e2576-e3fb-44a7-83ad-6193b6437ae0","Type":"ContainerStarted","Data":"8b9ce87adf483dd949e92ea026b0b245a3bdf7368e128e286d5304389d59701d"} Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.954474 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-utilities\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.954621 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-catalog-content\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.954658 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9tf2\" (UniqueName: \"kubernetes.io/projected/19d77559-bdf5-466a-9189-0c1e894fac2c-kube-api-access-c9tf2\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.954886 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-utilities\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.955408 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-catalog-content\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:57 crc kubenswrapper[4675]: I1125 12:50:57.971916 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9tf2\" (UniqueName: \"kubernetes.io/projected/19d77559-bdf5-466a-9189-0c1e894fac2c-kube-api-access-c9tf2\") pod \"redhat-operators-ql2wz\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.035700 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.558193 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ql2wz"] Nov 25 12:50:58 crc kubenswrapper[4675]: W1125 12:50:58.652067 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19d77559_bdf5_466a_9189_0c1e894fac2c.slice/crio-11b5c133b639b8ec42ded33fe924837308c2f1ab0e692cc1c976a165e524e0c8 WatchSource:0}: Error finding container 11b5c133b639b8ec42ded33fe924837308c2f1ab0e692cc1c976a165e524e0c8: Status 404 returned error can't find the container with id 11b5c133b639b8ec42ded33fe924837308c2f1ab0e692cc1c976a165e524e0c8 Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.946838 4675 generic.go:334] "Generic (PLEG): container finished" podID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerID="df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51" exitCode=0 Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.946925 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerDied","Data":"df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51"} Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.947207 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerStarted","Data":"11b5c133b639b8ec42ded33fe924837308c2f1ab0e692cc1c976a165e524e0c8"} Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.949349 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13f3fc9e-df33-4016-8d7e-a40112cdc27f","Type":"ContainerStarted","Data":"2f62a67d137e7853ff0fe5afbdce876b30a109bb3dcf43fa9ed4423d53e227a9"} Nov 25 12:50:58 crc kubenswrapper[4675]: I1125 12:50:58.951740 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa5e2576-e3fb-44a7-83ad-6193b6437ae0","Type":"ContainerStarted","Data":"3cc0aca47dd2cbe8cbce82727299688b27797ec1beb6f52d7fac6729e55082fe"} Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.416589 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-bl9s6"] Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.418367 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.421500 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.428612 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-bl9s6"] Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481547 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481639 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481690 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481721 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-config\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481862 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481896 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqwkz\" (UniqueName: \"kubernetes.io/projected/5e95eb38-5014-4433-a87c-c3b50881d25b-kube-api-access-bqwkz\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.481963 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583084 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583139 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-config\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583218 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583241 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqwkz\" (UniqueName: \"kubernetes.io/projected/5e95eb38-5014-4433-a87c-c3b50881d25b-kube-api-access-bqwkz\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583312 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583347 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.583391 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.584984 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.585521 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.586022 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-config\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.586137 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.586249 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.586421 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.603885 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqwkz\" (UniqueName: \"kubernetes.io/projected/5e95eb38-5014-4433-a87c-c3b50881d25b-kube-api-access-bqwkz\") pod \"dnsmasq-dns-79bd4cc8c9-bl9s6\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.742601 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:50:59 crc kubenswrapper[4675]: I1125 12:50:59.977583 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13f3fc9e-df33-4016-8d7e-a40112cdc27f","Type":"ContainerStarted","Data":"e80f7fc85b16aaaae8c6ac2c777c61784c0fbdc44b6ae4750a03b9f9fb69bae0"} Nov 25 12:51:00 crc kubenswrapper[4675]: I1125 12:51:00.280587 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-bl9s6"] Nov 25 12:51:00 crc kubenswrapper[4675]: I1125 12:51:00.986308 4675 generic.go:334] "Generic (PLEG): container finished" podID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerID="89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1" exitCode=0 Nov 25 12:51:00 crc kubenswrapper[4675]: I1125 12:51:00.986355 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" event={"ID":"5e95eb38-5014-4433-a87c-c3b50881d25b","Type":"ContainerDied","Data":"89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1"} Nov 25 12:51:00 crc kubenswrapper[4675]: I1125 12:51:00.986652 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" event={"ID":"5e95eb38-5014-4433-a87c-c3b50881d25b","Type":"ContainerStarted","Data":"3df77818c51657cfe4c608be88860208c6f0f04f0a76ceb431b6a3e1f8e9e624"} Nov 25 12:51:00 crc kubenswrapper[4675]: I1125 12:51:00.989547 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerStarted","Data":"6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4"} Nov 25 12:51:02 crc kubenswrapper[4675]: I1125 12:51:02.004596 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" event={"ID":"5e95eb38-5014-4433-a87c-c3b50881d25b","Type":"ContainerStarted","Data":"82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc"} Nov 25 12:51:02 crc kubenswrapper[4675]: I1125 12:51:02.005096 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:51:02 crc kubenswrapper[4675]: I1125 12:51:02.032437 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" podStartSLOduration=3.03241553 podStartE2EDuration="3.03241553s" podCreationTimestamp="2025-11-25 12:50:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:51:02.02684949 +0000 UTC m=+1407.198441861" watchObservedRunningTime="2025-11-25 12:51:02.03241553 +0000 UTC m=+1407.204007881" Nov 25 12:51:06 crc kubenswrapper[4675]: I1125 12:51:06.042053 4675 generic.go:334] "Generic (PLEG): container finished" podID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerID="6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4" exitCode=0 Nov 25 12:51:06 crc kubenswrapper[4675]: I1125 12:51:06.042110 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerDied","Data":"6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4"} Nov 25 12:51:07 crc kubenswrapper[4675]: I1125 12:51:07.055742 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerStarted","Data":"f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec"} Nov 25 12:51:07 crc kubenswrapper[4675]: I1125 12:51:07.081519 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ql2wz" podStartSLOduration=2.587599427 podStartE2EDuration="10.081493269s" podCreationTimestamp="2025-11-25 12:50:57 +0000 UTC" firstStartedPulling="2025-11-25 12:50:58.948884069 +0000 UTC m=+1404.120476410" lastFinishedPulling="2025-11-25 12:51:06.442777911 +0000 UTC m=+1411.614370252" observedRunningTime="2025-11-25 12:51:07.077058073 +0000 UTC m=+1412.248650424" watchObservedRunningTime="2025-11-25 12:51:07.081493269 +0000 UTC m=+1412.253085620" Nov 25 12:51:08 crc kubenswrapper[4675]: I1125 12:51:08.036064 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:51:08 crc kubenswrapper[4675]: I1125 12:51:08.036320 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:51:09 crc kubenswrapper[4675]: I1125 12:51:09.090122 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ql2wz" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="registry-server" probeResult="failure" output=< Nov 25 12:51:09 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 12:51:09 crc kubenswrapper[4675]: > Nov 25 12:51:09 crc kubenswrapper[4675]: I1125 12:51:09.743778 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:51:09 crc kubenswrapper[4675]: I1125 12:51:09.806755 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-jzgb8"] Nov 25 12:51:09 crc kubenswrapper[4675]: I1125 12:51:09.813724 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerName="dnsmasq-dns" containerID="cri-o://29cabc1a090e5abe9ba14cddf3e032467818d361d9c295db684261aadc195918" gracePeriod=10 Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.054825 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-r9wsm"] Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.056331 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.087437 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-r9wsm"] Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.173095 4675 generic.go:334] "Generic (PLEG): container finished" podID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerID="29cabc1a090e5abe9ba14cddf3e032467818d361d9c295db684261aadc195918" exitCode=0 Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.173137 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" event={"ID":"1c8edeb5-7208-4cd6-b861-03997e90e85c","Type":"ContainerDied","Data":"29cabc1a090e5abe9ba14cddf3e032467818d361d9c295db684261aadc195918"} Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177396 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8zhn\" (UniqueName: \"kubernetes.io/projected/0e609a3d-0025-458f-8086-595a9923a23d-kube-api-access-t8zhn\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177488 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-config\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177573 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177598 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177651 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177676 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.177722 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283529 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283575 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283625 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283646 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283689 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283735 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8zhn\" (UniqueName: \"kubernetes.io/projected/0e609a3d-0025-458f-8086-595a9923a23d-kube-api-access-t8zhn\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.283999 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-config\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.284941 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-config\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.285474 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.288328 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.289504 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.289584 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.289636 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e609a3d-0025-458f-8086-595a9923a23d-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.327855 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8zhn\" (UniqueName: \"kubernetes.io/projected/0e609a3d-0025-458f-8086-595a9923a23d-kube-api-access-t8zhn\") pod \"dnsmasq-dns-6cd9bffc9-r9wsm\" (UID: \"0e609a3d-0025-458f-8086-595a9923a23d\") " pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.391220 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.543070 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.692753 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-swift-storage-0\") pod \"1c8edeb5-7208-4cd6-b861-03997e90e85c\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.692876 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-nb\") pod \"1c8edeb5-7208-4cd6-b861-03997e90e85c\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.693019 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-sb\") pod \"1c8edeb5-7208-4cd6-b861-03997e90e85c\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.693048 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-config\") pod \"1c8edeb5-7208-4cd6-b861-03997e90e85c\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.693129 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-svc\") pod \"1c8edeb5-7208-4cd6-b861-03997e90e85c\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.693165 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z24mw\" (UniqueName: \"kubernetes.io/projected/1c8edeb5-7208-4cd6-b861-03997e90e85c-kube-api-access-z24mw\") pod \"1c8edeb5-7208-4cd6-b861-03997e90e85c\" (UID: \"1c8edeb5-7208-4cd6-b861-03997e90e85c\") " Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.700754 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c8edeb5-7208-4cd6-b861-03997e90e85c-kube-api-access-z24mw" (OuterVolumeSpecName: "kube-api-access-z24mw") pod "1c8edeb5-7208-4cd6-b861-03997e90e85c" (UID: "1c8edeb5-7208-4cd6-b861-03997e90e85c"). InnerVolumeSpecName "kube-api-access-z24mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.770441 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1c8edeb5-7208-4cd6-b861-03997e90e85c" (UID: "1c8edeb5-7208-4cd6-b861-03997e90e85c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.770461 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1c8edeb5-7208-4cd6-b861-03997e90e85c" (UID: "1c8edeb5-7208-4cd6-b861-03997e90e85c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.787373 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1c8edeb5-7208-4cd6-b861-03997e90e85c" (UID: "1c8edeb5-7208-4cd6-b861-03997e90e85c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.787440 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-config" (OuterVolumeSpecName: "config") pod "1c8edeb5-7208-4cd6-b861-03997e90e85c" (UID: "1c8edeb5-7208-4cd6-b861-03997e90e85c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.790270 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1c8edeb5-7208-4cd6-b861-03997e90e85c" (UID: "1c8edeb5-7208-4cd6-b861-03997e90e85c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.795691 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.795713 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.795722 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.795733 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z24mw\" (UniqueName: \"kubernetes.io/projected/1c8edeb5-7208-4cd6-b861-03997e90e85c-kube-api-access-z24mw\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.795743 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.795752 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c8edeb5-7208-4cd6-b861-03997e90e85c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:10 crc kubenswrapper[4675]: I1125 12:51:10.946212 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-r9wsm"] Nov 25 12:51:10 crc kubenswrapper[4675]: W1125 12:51:10.955281 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e609a3d_0025_458f_8086_595a9923a23d.slice/crio-ad73932b337c71db0ad28ab40c639c988111c1ee3c358fe3bce60d97bfb8d653 WatchSource:0}: Error finding container ad73932b337c71db0ad28ab40c639c988111c1ee3c358fe3bce60d97bfb8d653: Status 404 returned error can't find the container with id ad73932b337c71db0ad28ab40c639c988111c1ee3c358fe3bce60d97bfb8d653 Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.184298 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" event={"ID":"0e609a3d-0025-458f-8086-595a9923a23d","Type":"ContainerStarted","Data":"ad73932b337c71db0ad28ab40c639c988111c1ee3c358fe3bce60d97bfb8d653"} Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.189310 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" event={"ID":"1c8edeb5-7208-4cd6-b861-03997e90e85c","Type":"ContainerDied","Data":"f235d92f402b70049be41649c22b1257e74271af0f6a1fc226cc625921dfcbf6"} Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.189387 4675 scope.go:117] "RemoveContainer" containerID="29cabc1a090e5abe9ba14cddf3e032467818d361d9c295db684261aadc195918" Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.189588 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-jzgb8" Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.209754 4675 scope.go:117] "RemoveContainer" containerID="e64ec1af788e77cf7a5a1c47d824d508fc2fdc8cb8d7a66046997ee786ec5336" Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.231961 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-jzgb8"] Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.249360 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-jzgb8"] Nov 25 12:51:11 crc kubenswrapper[4675]: I1125 12:51:11.546055 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" path="/var/lib/kubelet/pods/1c8edeb5-7208-4cd6-b861-03997e90e85c/volumes" Nov 25 12:51:12 crc kubenswrapper[4675]: I1125 12:51:12.204359 4675 generic.go:334] "Generic (PLEG): container finished" podID="0e609a3d-0025-458f-8086-595a9923a23d" containerID="cb5e7e184388b3e9b73c8267905db16cfd3c2a91ac39eff8fa3fc0b94d2380bf" exitCode=0 Nov 25 12:51:12 crc kubenswrapper[4675]: I1125 12:51:12.204433 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" event={"ID":"0e609a3d-0025-458f-8086-595a9923a23d","Type":"ContainerDied","Data":"cb5e7e184388b3e9b73c8267905db16cfd3c2a91ac39eff8fa3fc0b94d2380bf"} Nov 25 12:51:13 crc kubenswrapper[4675]: I1125 12:51:13.214665 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" event={"ID":"0e609a3d-0025-458f-8086-595a9923a23d","Type":"ContainerStarted","Data":"3521f096ac3e295c4819e35ddb55f9da83df8b92db2cfa549ee47bdbb62e66a4"} Nov 25 12:51:13 crc kubenswrapper[4675]: I1125 12:51:13.215154 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:13 crc kubenswrapper[4675]: I1125 12:51:13.235868 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" podStartSLOduration=3.235847691 podStartE2EDuration="3.235847691s" podCreationTimestamp="2025-11-25 12:51:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:51:13.231450106 +0000 UTC m=+1418.403042467" watchObservedRunningTime="2025-11-25 12:51:13.235847691 +0000 UTC m=+1418.407440032" Nov 25 12:51:18 crc kubenswrapper[4675]: I1125 12:51:18.086957 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:51:18 crc kubenswrapper[4675]: I1125 12:51:18.141599 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:51:18 crc kubenswrapper[4675]: I1125 12:51:18.332499 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ql2wz"] Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.269026 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ql2wz" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="registry-server" containerID="cri-o://f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec" gracePeriod=2 Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.724904 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.768726 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-utilities\") pod \"19d77559-bdf5-466a-9189-0c1e894fac2c\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.768908 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-catalog-content\") pod \"19d77559-bdf5-466a-9189-0c1e894fac2c\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.768958 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9tf2\" (UniqueName: \"kubernetes.io/projected/19d77559-bdf5-466a-9189-0c1e894fac2c-kube-api-access-c9tf2\") pod \"19d77559-bdf5-466a-9189-0c1e894fac2c\" (UID: \"19d77559-bdf5-466a-9189-0c1e894fac2c\") " Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.777470 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-utilities" (OuterVolumeSpecName: "utilities") pod "19d77559-bdf5-466a-9189-0c1e894fac2c" (UID: "19d77559-bdf5-466a-9189-0c1e894fac2c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.779060 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19d77559-bdf5-466a-9189-0c1e894fac2c-kube-api-access-c9tf2" (OuterVolumeSpecName: "kube-api-access-c9tf2") pod "19d77559-bdf5-466a-9189-0c1e894fac2c" (UID: "19d77559-bdf5-466a-9189-0c1e894fac2c"). InnerVolumeSpecName "kube-api-access-c9tf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.867518 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "19d77559-bdf5-466a-9189-0c1e894fac2c" (UID: "19d77559-bdf5-466a-9189-0c1e894fac2c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.871252 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.871291 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19d77559-bdf5-466a-9189-0c1e894fac2c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:19 crc kubenswrapper[4675]: I1125 12:51:19.871329 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9tf2\" (UniqueName: \"kubernetes.io/projected/19d77559-bdf5-466a-9189-0c1e894fac2c-kube-api-access-c9tf2\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.283672 4675 generic.go:334] "Generic (PLEG): container finished" podID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerID="f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec" exitCode=0 Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.283728 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerDied","Data":"f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec"} Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.283753 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ql2wz" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.283770 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ql2wz" event={"ID":"19d77559-bdf5-466a-9189-0c1e894fac2c","Type":"ContainerDied","Data":"11b5c133b639b8ec42ded33fe924837308c2f1ab0e692cc1c976a165e524e0c8"} Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.283791 4675 scope.go:117] "RemoveContainer" containerID="f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.319064 4675 scope.go:117] "RemoveContainer" containerID="6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.319452 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ql2wz"] Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.328876 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ql2wz"] Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.342301 4675 scope.go:117] "RemoveContainer" containerID="df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.395975 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd9bffc9-r9wsm" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.403066 4675 scope.go:117] "RemoveContainer" containerID="f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec" Nov 25 12:51:20 crc kubenswrapper[4675]: E1125 12:51:20.403593 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec\": container with ID starting with f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec not found: ID does not exist" containerID="f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.403623 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec"} err="failed to get container status \"f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec\": rpc error: code = NotFound desc = could not find container \"f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec\": container with ID starting with f08b9592381659c5c4fc19a45ee8e99f9c86086a685395c7bd096c23ddd07aec not found: ID does not exist" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.403641 4675 scope.go:117] "RemoveContainer" containerID="6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4" Nov 25 12:51:20 crc kubenswrapper[4675]: E1125 12:51:20.404397 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4\": container with ID starting with 6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4 not found: ID does not exist" containerID="6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.404424 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4"} err="failed to get container status \"6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4\": rpc error: code = NotFound desc = could not find container \"6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4\": container with ID starting with 6eb02bf67bb1c86271d0c89e56dac809006ab76957dc97449dcfda9cca4c88a4 not found: ID does not exist" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.404439 4675 scope.go:117] "RemoveContainer" containerID="df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51" Nov 25 12:51:20 crc kubenswrapper[4675]: E1125 12:51:20.404664 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51\": container with ID starting with df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51 not found: ID does not exist" containerID="df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.404685 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51"} err="failed to get container status \"df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51\": rpc error: code = NotFound desc = could not find container \"df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51\": container with ID starting with df480206d28f5f2f1621ccd00c01a2c5f0aef28cac35e6584d9e0989bbed7a51 not found: ID does not exist" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.462156 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-bl9s6"] Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.462404 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerName="dnsmasq-dns" containerID="cri-o://82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc" gracePeriod=10 Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.943491 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993076 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-nb\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993515 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqwkz\" (UniqueName: \"kubernetes.io/projected/5e95eb38-5014-4433-a87c-c3b50881d25b-kube-api-access-bqwkz\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993577 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-config\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993603 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-swift-storage-0\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993633 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-openstack-edpm-ipam\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993713 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-svc\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:20 crc kubenswrapper[4675]: I1125 12:51:20.993736 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-sb\") pod \"5e95eb38-5014-4433-a87c-c3b50881d25b\" (UID: \"5e95eb38-5014-4433-a87c-c3b50881d25b\") " Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.002562 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e95eb38-5014-4433-a87c-c3b50881d25b-kube-api-access-bqwkz" (OuterVolumeSpecName: "kube-api-access-bqwkz") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "kube-api-access-bqwkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.059213 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.064867 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.069282 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.079332 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-config" (OuterVolumeSpecName: "config") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.089770 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.093278 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5e95eb38-5014-4433-a87c-c3b50881d25b" (UID: "5e95eb38-5014-4433-a87c-c3b50881d25b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096103 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096127 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqwkz\" (UniqueName: \"kubernetes.io/projected/5e95eb38-5014-4433-a87c-c3b50881d25b-kube-api-access-bqwkz\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096139 4675 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-config\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096147 4675 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096156 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096163 4675 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.096172 4675 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e95eb38-5014-4433-a87c-c3b50881d25b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.297725 4675 generic.go:334] "Generic (PLEG): container finished" podID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerID="82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc" exitCode=0 Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.297769 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" event={"ID":"5e95eb38-5014-4433-a87c-c3b50881d25b","Type":"ContainerDied","Data":"82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc"} Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.297827 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" event={"ID":"5e95eb38-5014-4433-a87c-c3b50881d25b","Type":"ContainerDied","Data":"3df77818c51657cfe4c608be88860208c6f0f04f0a76ceb431b6a3e1f8e9e624"} Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.297866 4675 scope.go:117] "RemoveContainer" containerID="82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.297986 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-bl9s6" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.336644 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-bl9s6"] Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.339827 4675 scope.go:117] "RemoveContainer" containerID="89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.346727 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-bl9s6"] Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.372788 4675 scope.go:117] "RemoveContainer" containerID="82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc" Nov 25 12:51:21 crc kubenswrapper[4675]: E1125 12:51:21.373518 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc\": container with ID starting with 82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc not found: ID does not exist" containerID="82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.373557 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc"} err="failed to get container status \"82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc\": rpc error: code = NotFound desc = could not find container \"82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc\": container with ID starting with 82c3ab7f29f426b68da911367abeb0355f97c4018779cf116f9944f7820ed6dc not found: ID does not exist" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.373583 4675 scope.go:117] "RemoveContainer" containerID="89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1" Nov 25 12:51:21 crc kubenswrapper[4675]: E1125 12:51:21.375075 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1\": container with ID starting with 89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1 not found: ID does not exist" containerID="89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.375132 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1"} err="failed to get container status \"89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1\": rpc error: code = NotFound desc = could not find container \"89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1\": container with ID starting with 89f58cffa9f03fe4bfdba4f729bdd4a499db7bc1065e5e013520a0b4cffb8bb1 not found: ID does not exist" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.543453 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" path="/var/lib/kubelet/pods/19d77559-bdf5-466a-9189-0c1e894fac2c/volumes" Nov 25 12:51:21 crc kubenswrapper[4675]: I1125 12:51:21.544411 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" path="/var/lib/kubelet/pods/5e95eb38-5014-4433-a87c-c3b50881d25b/volumes" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.633899 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-87bnh"] Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636096 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerName="init" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636132 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerName="init" Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636158 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="extract-content" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636171 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="extract-content" Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636187 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="registry-server" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636194 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="registry-server" Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636208 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="extract-utilities" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636215 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="extract-utilities" Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636225 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerName="init" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636232 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerName="init" Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636257 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerName="dnsmasq-dns" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636264 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerName="dnsmasq-dns" Nov 25 12:51:29 crc kubenswrapper[4675]: E1125 12:51:29.636279 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerName="dnsmasq-dns" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636286 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerName="dnsmasq-dns" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636511 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="19d77559-bdf5-466a-9189-0c1e894fac2c" containerName="registry-server" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636525 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e95eb38-5014-4433-a87c-c3b50881d25b" containerName="dnsmasq-dns" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.636546 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8edeb5-7208-4cd6-b861-03997e90e85c" containerName="dnsmasq-dns" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.638512 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.662099 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-87bnh"] Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.768576 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-utilities\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.769139 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp7th\" (UniqueName: \"kubernetes.io/projected/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-kube-api-access-bp7th\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.769339 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-catalog-content\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.871542 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-utilities\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.871603 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp7th\" (UniqueName: \"kubernetes.io/projected/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-kube-api-access-bp7th\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.871675 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-catalog-content\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.872217 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-utilities\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.872285 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-catalog-content\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.899732 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp7th\" (UniqueName: \"kubernetes.io/projected/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-kube-api-access-bp7th\") pod \"community-operators-87bnh\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:29 crc kubenswrapper[4675]: I1125 12:51:29.964245 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:30 crc kubenswrapper[4675]: I1125 12:51:30.474532 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-87bnh"] Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.438721 4675 generic.go:334] "Generic (PLEG): container finished" podID="13f3fc9e-df33-4016-8d7e-a40112cdc27f" containerID="e80f7fc85b16aaaae8c6ac2c777c61784c0fbdc44b6ae4750a03b9f9fb69bae0" exitCode=0 Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.438793 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13f3fc9e-df33-4016-8d7e-a40112cdc27f","Type":"ContainerDied","Data":"e80f7fc85b16aaaae8c6ac2c777c61784c0fbdc44b6ae4750a03b9f9fb69bae0"} Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.460707 4675 generic.go:334] "Generic (PLEG): container finished" podID="aa5e2576-e3fb-44a7-83ad-6193b6437ae0" containerID="3cc0aca47dd2cbe8cbce82727299688b27797ec1beb6f52d7fac6729e55082fe" exitCode=0 Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.461178 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa5e2576-e3fb-44a7-83ad-6193b6437ae0","Type":"ContainerDied","Data":"3cc0aca47dd2cbe8cbce82727299688b27797ec1beb6f52d7fac6729e55082fe"} Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.481747 4675 generic.go:334] "Generic (PLEG): container finished" podID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerID="fa819cdff771ef34fc2eacead0f45cfe1c32359caa1b83586be38cef09365c21" exitCode=0 Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.481917 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerDied","Data":"fa819cdff771ef34fc2eacead0f45cfe1c32359caa1b83586be38cef09365c21"} Nov 25 12:51:31 crc kubenswrapper[4675]: I1125 12:51:31.481971 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerStarted","Data":"5b9cb61959e0b7ea64104bcd5b4ff34372ee6265118c6ff23e77267e140a3961"} Nov 25 12:51:32 crc kubenswrapper[4675]: I1125 12:51:32.500455 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13f3fc9e-df33-4016-8d7e-a40112cdc27f","Type":"ContainerStarted","Data":"4b2efa15e4601e0f09b3a50f0b3b54953ebc6ffda2f77f764350659eff6bf2ca"} Nov 25 12:51:32 crc kubenswrapper[4675]: I1125 12:51:32.505025 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:51:32 crc kubenswrapper[4675]: I1125 12:51:32.505300 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa5e2576-e3fb-44a7-83ad-6193b6437ae0","Type":"ContainerStarted","Data":"b7d29c7298d4483b0bc0ae5bad0aaeceb0d33a8aa9e4f64551d2ce59eb4ec2aa"} Nov 25 12:51:32 crc kubenswrapper[4675]: I1125 12:51:32.505572 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 12:51:32 crc kubenswrapper[4675]: I1125 12:51:32.596428 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.596411651 podStartE2EDuration="36.596411651s" podCreationTimestamp="2025-11-25 12:50:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:51:32.583767614 +0000 UTC m=+1437.755359955" watchObservedRunningTime="2025-11-25 12:51:32.596411651 +0000 UTC m=+1437.768003992" Nov 25 12:51:32 crc kubenswrapper[4675]: I1125 12:51:32.623328 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.623310027 podStartE2EDuration="37.623310027s" podCreationTimestamp="2025-11-25 12:50:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 12:51:32.61851446 +0000 UTC m=+1437.790106801" watchObservedRunningTime="2025-11-25 12:51:32.623310027 +0000 UTC m=+1437.794902368" Nov 25 12:51:33 crc kubenswrapper[4675]: I1125 12:51:33.517772 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerStarted","Data":"a6bda3fbdce5960a098682950e26e907e4cf045695e9b923198963168c62195c"} Nov 25 12:51:36 crc kubenswrapper[4675]: I1125 12:51:36.545791 4675 generic.go:334] "Generic (PLEG): container finished" podID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerID="a6bda3fbdce5960a098682950e26e907e4cf045695e9b923198963168c62195c" exitCode=0 Nov 25 12:51:36 crc kubenswrapper[4675]: I1125 12:51:36.545879 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerDied","Data":"a6bda3fbdce5960a098682950e26e907e4cf045695e9b923198963168c62195c"} Nov 25 12:51:38 crc kubenswrapper[4675]: I1125 12:51:38.566471 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerStarted","Data":"761b8eca681ff8ae22286960f7b61e51f5a7e3603370d148a17bbe722bade15d"} Nov 25 12:51:38 crc kubenswrapper[4675]: I1125 12:51:38.601766 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-87bnh" podStartSLOduration=2.996192726 podStartE2EDuration="9.601744791s" podCreationTimestamp="2025-11-25 12:51:29 +0000 UTC" firstStartedPulling="2025-11-25 12:51:31.483755403 +0000 UTC m=+1436.655347754" lastFinishedPulling="2025-11-25 12:51:38.089307478 +0000 UTC m=+1443.260899819" observedRunningTime="2025-11-25 12:51:38.58704518 +0000 UTC m=+1443.758637531" watchObservedRunningTime="2025-11-25 12:51:38.601744791 +0000 UTC m=+1443.773337142" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.893707 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z"] Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.896858 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.903613 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.903768 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.903911 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.904054 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.965305 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.965353 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.984167 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.984366 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h5gw\" (UniqueName: \"kubernetes.io/projected/25753af6-5930-488b-8ffc-8b905d803063-kube-api-access-8h5gw\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.984418 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:39 crc kubenswrapper[4675]: I1125 12:51:39.984473 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.091504 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h5gw\" (UniqueName: \"kubernetes.io/projected/25753af6-5930-488b-8ffc-8b905d803063-kube-api-access-8h5gw\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.091574 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.091610 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.091688 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.099550 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.100147 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.111332 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.114726 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h5gw\" (UniqueName: \"kubernetes.io/projected/25753af6-5930-488b-8ffc-8b905d803063-kube-api-access-8h5gw\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.219324 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:51:40 crc kubenswrapper[4675]: I1125 12:51:40.277421 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z"] Nov 25 12:51:41 crc kubenswrapper[4675]: I1125 12:51:41.020539 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-87bnh" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="registry-server" probeResult="failure" output=< Nov 25 12:51:41 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 12:51:41 crc kubenswrapper[4675]: > Nov 25 12:51:41 crc kubenswrapper[4675]: I1125 12:51:41.637735 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z"] Nov 25 12:51:42 crc kubenswrapper[4675]: I1125 12:51:42.605853 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" event={"ID":"25753af6-5930-488b-8ffc-8b905d803063","Type":"ContainerStarted","Data":"9f12f23037c237c279f75988f029134bec451e07c3462fa1728d2b835774bb5a"} Nov 25 12:51:44 crc kubenswrapper[4675]: I1125 12:51:44.239548 4675 scope.go:117] "RemoveContainer" containerID="f423bf9cd908ab5cc69f6880c12a5abd2d642ec05fa5c43c53c12aeee09edcdd" Nov 25 12:51:44 crc kubenswrapper[4675]: I1125 12:51:44.291301 4675 scope.go:117] "RemoveContainer" containerID="8fe10d0557dc21343c75ea1187cf8ffc2c42c40efc72eae7f5243b2511de0468" Nov 25 12:51:46 crc kubenswrapper[4675]: I1125 12:51:46.314096 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="aa5e2576-e3fb-44a7-83ad-6193b6437ae0" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.208:5671: connect: connection refused" Nov 25 12:51:47 crc kubenswrapper[4675]: I1125 12:51:47.474349 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="13f3fc9e-df33-4016-8d7e-a40112cdc27f" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.209:5671: connect: connection refused" Nov 25 12:51:50 crc kubenswrapper[4675]: I1125 12:51:50.031602 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:50 crc kubenswrapper[4675]: I1125 12:51:50.105563 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:50 crc kubenswrapper[4675]: I1125 12:51:50.289652 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-87bnh"] Nov 25 12:51:51 crc kubenswrapper[4675]: I1125 12:51:51.958937 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-87bnh" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="registry-server" containerID="cri-o://761b8eca681ff8ae22286960f7b61e51f5a7e3603370d148a17bbe722bade15d" gracePeriod=2 Nov 25 12:51:52 crc kubenswrapper[4675]: I1125 12:51:52.978684 4675 generic.go:334] "Generic (PLEG): container finished" podID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerID="761b8eca681ff8ae22286960f7b61e51f5a7e3603370d148a17bbe722bade15d" exitCode=0 Nov 25 12:51:52 crc kubenswrapper[4675]: I1125 12:51:52.978962 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerDied","Data":"761b8eca681ff8ae22286960f7b61e51f5a7e3603370d148a17bbe722bade15d"} Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.306217 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.564193 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.647117 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-utilities\") pod \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.647494 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bp7th\" (UniqueName: \"kubernetes.io/projected/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-kube-api-access-bp7th\") pod \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.647693 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-catalog-content\") pod \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\" (UID: \"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f\") " Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.649373 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-utilities" (OuterVolumeSpecName: "utilities") pod "dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" (UID: "dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.654724 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-kube-api-access-bp7th" (OuterVolumeSpecName: "kube-api-access-bp7th") pod "dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" (UID: "dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f"). InnerVolumeSpecName "kube-api-access-bp7th". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.709787 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" (UID: "dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.750027 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.750062 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:56 crc kubenswrapper[4675]: I1125 12:51:56.750088 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bp7th\" (UniqueName: \"kubernetes.io/projected/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f-kube-api-access-bp7th\") on node \"crc\" DevicePath \"\"" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.028440 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" event={"ID":"25753af6-5930-488b-8ffc-8b905d803063","Type":"ContainerStarted","Data":"0df2d9a774ed240440ddd50810c5217b1c71c052e8f696c2d863922677dd67a3"} Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.033202 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-87bnh" event={"ID":"dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f","Type":"ContainerDied","Data":"5b9cb61959e0b7ea64104bcd5b4ff34372ee6265118c6ff23e77267e140a3961"} Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.033261 4675 scope.go:117] "RemoveContainer" containerID="761b8eca681ff8ae22286960f7b61e51f5a7e3603370d148a17bbe722bade15d" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.033415 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-87bnh" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.067572 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" podStartSLOduration=3.5709485560000003 podStartE2EDuration="18.0675511s" podCreationTimestamp="2025-11-25 12:51:39 +0000 UTC" firstStartedPulling="2025-11-25 12:51:41.645937625 +0000 UTC m=+1446.817529966" lastFinishedPulling="2025-11-25 12:51:56.142540159 +0000 UTC m=+1461.314132510" observedRunningTime="2025-11-25 12:51:57.056598814 +0000 UTC m=+1462.228191155" watchObservedRunningTime="2025-11-25 12:51:57.0675511 +0000 UTC m=+1462.239143431" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.099015 4675 scope.go:117] "RemoveContainer" containerID="a6bda3fbdce5960a098682950e26e907e4cf045695e9b923198963168c62195c" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.102211 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-87bnh"] Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.132540 4675 scope.go:117] "RemoveContainer" containerID="fa819cdff771ef34fc2eacead0f45cfe1c32359caa1b83586be38cef09365c21" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.133857 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-87bnh"] Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.472973 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 12:51:57 crc kubenswrapper[4675]: I1125 12:51:57.556726 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" path="/var/lib/kubelet/pods/dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f/volumes" Nov 25 12:52:09 crc kubenswrapper[4675]: I1125 12:52:09.139180 4675 generic.go:334] "Generic (PLEG): container finished" podID="25753af6-5930-488b-8ffc-8b905d803063" containerID="0df2d9a774ed240440ddd50810c5217b1c71c052e8f696c2d863922677dd67a3" exitCode=0 Nov 25 12:52:09 crc kubenswrapper[4675]: I1125 12:52:09.139256 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" event={"ID":"25753af6-5930-488b-8ffc-8b905d803063","Type":"ContainerDied","Data":"0df2d9a774ed240440ddd50810c5217b1c71c052e8f696c2d863922677dd67a3"} Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.684134 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.830563 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h5gw\" (UniqueName: \"kubernetes.io/projected/25753af6-5930-488b-8ffc-8b905d803063-kube-api-access-8h5gw\") pod \"25753af6-5930-488b-8ffc-8b905d803063\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.830711 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-ssh-key\") pod \"25753af6-5930-488b-8ffc-8b905d803063\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.830792 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-inventory\") pod \"25753af6-5930-488b-8ffc-8b905d803063\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.830847 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-repo-setup-combined-ca-bundle\") pod \"25753af6-5930-488b-8ffc-8b905d803063\" (UID: \"25753af6-5930-488b-8ffc-8b905d803063\") " Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.837056 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25753af6-5930-488b-8ffc-8b905d803063-kube-api-access-8h5gw" (OuterVolumeSpecName: "kube-api-access-8h5gw") pod "25753af6-5930-488b-8ffc-8b905d803063" (UID: "25753af6-5930-488b-8ffc-8b905d803063"). InnerVolumeSpecName "kube-api-access-8h5gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.839097 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "25753af6-5930-488b-8ffc-8b905d803063" (UID: "25753af6-5930-488b-8ffc-8b905d803063"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.864266 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-inventory" (OuterVolumeSpecName: "inventory") pod "25753af6-5930-488b-8ffc-8b905d803063" (UID: "25753af6-5930-488b-8ffc-8b905d803063"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.870369 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "25753af6-5930-488b-8ffc-8b905d803063" (UID: "25753af6-5930-488b-8ffc-8b905d803063"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.933317 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h5gw\" (UniqueName: \"kubernetes.io/projected/25753af6-5930-488b-8ffc-8b905d803063-kube-api-access-8h5gw\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.933457 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.933565 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:10 crc kubenswrapper[4675]: I1125 12:52:10.933658 4675 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25753af6-5930-488b-8ffc-8b905d803063-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.160123 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" event={"ID":"25753af6-5930-488b-8ffc-8b905d803063","Type":"ContainerDied","Data":"9f12f23037c237c279f75988f029134bec451e07c3462fa1728d2b835774bb5a"} Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.160167 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f12f23037c237c279f75988f029134bec451e07c3462fa1728d2b835774bb5a" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.160192 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.299433 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54"] Nov 25 12:52:11 crc kubenswrapper[4675]: E1125 12:52:11.299917 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="registry-server" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.299938 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="registry-server" Nov 25 12:52:11 crc kubenswrapper[4675]: E1125 12:52:11.299960 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="extract-utilities" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.299970 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="extract-utilities" Nov 25 12:52:11 crc kubenswrapper[4675]: E1125 12:52:11.300002 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="extract-content" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.300010 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="extract-content" Nov 25 12:52:11 crc kubenswrapper[4675]: E1125 12:52:11.300029 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25753af6-5930-488b-8ffc-8b905d803063" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.300039 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="25753af6-5930-488b-8ffc-8b905d803063" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.300324 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="25753af6-5930-488b-8ffc-8b905d803063" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.300383 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcd3dad5-ab33-4cdf-8dbf-8058fdcb137f" containerName="registry-server" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.301513 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.308878 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.309066 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.309542 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.310802 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54"] Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.315340 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.449044 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.449157 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h9xz\" (UniqueName: \"kubernetes.io/projected/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-kube-api-access-6h9xz\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.449195 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.551155 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.551228 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h9xz\" (UniqueName: \"kubernetes.io/projected/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-kube-api-access-6h9xz\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.551254 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.556405 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.556868 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.579923 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h9xz\" (UniqueName: \"kubernetes.io/projected/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-kube-api-access-6h9xz\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-gcz54\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:11 crc kubenswrapper[4675]: I1125 12:52:11.665775 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:12 crc kubenswrapper[4675]: I1125 12:52:12.249035 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54"] Nov 25 12:52:13 crc kubenswrapper[4675]: I1125 12:52:13.185539 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" event={"ID":"876a0f8c-9396-49fe-b1b8-5c44e691a7c9","Type":"ContainerStarted","Data":"db23ae95a4591fbef95e852152d55b3ebb0b7274c3091fd3a9aeafea81c813cd"} Nov 25 12:52:13 crc kubenswrapper[4675]: I1125 12:52:13.185887 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" event={"ID":"876a0f8c-9396-49fe-b1b8-5c44e691a7c9","Type":"ContainerStarted","Data":"45ddf9087249398d00648c215cb3c7a4bfa445ded5e404e123b9a9c8c7d71b67"} Nov 25 12:52:13 crc kubenswrapper[4675]: I1125 12:52:13.208122 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" podStartSLOduration=1.7966479020000001 podStartE2EDuration="2.208105196s" podCreationTimestamp="2025-11-25 12:52:11 +0000 UTC" firstStartedPulling="2025-11-25 12:52:12.261713598 +0000 UTC m=+1477.433305939" lastFinishedPulling="2025-11-25 12:52:12.673170892 +0000 UTC m=+1477.844763233" observedRunningTime="2025-11-25 12:52:13.199278605 +0000 UTC m=+1478.370870966" watchObservedRunningTime="2025-11-25 12:52:13.208105196 +0000 UTC m=+1478.379697537" Nov 25 12:52:16 crc kubenswrapper[4675]: I1125 12:52:16.212535 4675 generic.go:334] "Generic (PLEG): container finished" podID="876a0f8c-9396-49fe-b1b8-5c44e691a7c9" containerID="db23ae95a4591fbef95e852152d55b3ebb0b7274c3091fd3a9aeafea81c813cd" exitCode=0 Nov 25 12:52:16 crc kubenswrapper[4675]: I1125 12:52:16.212635 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" event={"ID":"876a0f8c-9396-49fe-b1b8-5c44e691a7c9","Type":"ContainerDied","Data":"db23ae95a4591fbef95e852152d55b3ebb0b7274c3091fd3a9aeafea81c813cd"} Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.624920 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.788915 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6h9xz\" (UniqueName: \"kubernetes.io/projected/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-kube-api-access-6h9xz\") pod \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.789049 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-inventory\") pod \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.789102 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-ssh-key\") pod \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\" (UID: \"876a0f8c-9396-49fe-b1b8-5c44e691a7c9\") " Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.794139 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-kube-api-access-6h9xz" (OuterVolumeSpecName: "kube-api-access-6h9xz") pod "876a0f8c-9396-49fe-b1b8-5c44e691a7c9" (UID: "876a0f8c-9396-49fe-b1b8-5c44e691a7c9"). InnerVolumeSpecName "kube-api-access-6h9xz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.821971 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-inventory" (OuterVolumeSpecName: "inventory") pod "876a0f8c-9396-49fe-b1b8-5c44e691a7c9" (UID: "876a0f8c-9396-49fe-b1b8-5c44e691a7c9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.822540 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "876a0f8c-9396-49fe-b1b8-5c44e691a7c9" (UID: "876a0f8c-9396-49fe-b1b8-5c44e691a7c9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.891397 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.891428 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:17 crc kubenswrapper[4675]: I1125 12:52:17.891437 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6h9xz\" (UniqueName: \"kubernetes.io/projected/876a0f8c-9396-49fe-b1b8-5c44e691a7c9-kube-api-access-6h9xz\") on node \"crc\" DevicePath \"\"" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.234957 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" event={"ID":"876a0f8c-9396-49fe-b1b8-5c44e691a7c9","Type":"ContainerDied","Data":"45ddf9087249398d00648c215cb3c7a4bfa445ded5e404e123b9a9c8c7d71b67"} Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.235003 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45ddf9087249398d00648c215cb3c7a4bfa445ded5e404e123b9a9c8c7d71b67" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.235004 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-gcz54" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.318325 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f"] Nov 25 12:52:18 crc kubenswrapper[4675]: E1125 12:52:18.318793 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="876a0f8c-9396-49fe-b1b8-5c44e691a7c9" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.319065 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="876a0f8c-9396-49fe-b1b8-5c44e691a7c9" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.319316 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="876a0f8c-9396-49fe-b1b8-5c44e691a7c9" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.320039 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.323647 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.323833 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.324149 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.324327 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.362269 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f"] Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.401670 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.402122 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.402164 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.402201 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4r5j\" (UniqueName: \"kubernetes.io/projected/3f838da8-c090-474d-826e-592b92857777-kube-api-access-k4r5j\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.504620 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4r5j\" (UniqueName: \"kubernetes.io/projected/3f838da8-c090-474d-826e-592b92857777-kube-api-access-k4r5j\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.504838 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.504910 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.504995 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.510024 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.510496 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.520558 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.522609 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4r5j\" (UniqueName: \"kubernetes.io/projected/3f838da8-c090-474d-826e-592b92857777-kube-api-access-k4r5j\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:18 crc kubenswrapper[4675]: I1125 12:52:18.651974 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:52:19 crc kubenswrapper[4675]: I1125 12:52:19.231319 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f"] Nov 25 12:52:19 crc kubenswrapper[4675]: I1125 12:52:19.246033 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" event={"ID":"3f838da8-c090-474d-826e-592b92857777","Type":"ContainerStarted","Data":"89fc6ac51b46de157ebdf11c080af4827db151064c52eaf49391d7fd7809974a"} Nov 25 12:52:20 crc kubenswrapper[4675]: I1125 12:52:20.256208 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" event={"ID":"3f838da8-c090-474d-826e-592b92857777","Type":"ContainerStarted","Data":"ba843a3c9ed67c74cb955c1a259b2fbc4d077d5f3248da75bb94ff3880a12f41"} Nov 25 12:52:20 crc kubenswrapper[4675]: I1125 12:52:20.275124 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" podStartSLOduration=1.828133826 podStartE2EDuration="2.275106161s" podCreationTimestamp="2025-11-25 12:52:18 +0000 UTC" firstStartedPulling="2025-11-25 12:52:19.237093321 +0000 UTC m=+1484.408685662" lastFinishedPulling="2025-11-25 12:52:19.684065656 +0000 UTC m=+1484.855657997" observedRunningTime="2025-11-25 12:52:20.271412927 +0000 UTC m=+1485.443005268" watchObservedRunningTime="2025-11-25 12:52:20.275106161 +0000 UTC m=+1485.446698502" Nov 25 12:52:43 crc kubenswrapper[4675]: I1125 12:52:43.663105 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:52:43 crc kubenswrapper[4675]: I1125 12:52:43.663626 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:52:44 crc kubenswrapper[4675]: I1125 12:52:44.592530 4675 scope.go:117] "RemoveContainer" containerID="57348e83ac2f41ec1acdd7c66c68f0633c636d052d1dc0604ad3155f4af05fda" Nov 25 12:53:13 crc kubenswrapper[4675]: I1125 12:53:13.662441 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:53:13 crc kubenswrapper[4675]: I1125 12:53:13.662929 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:53:43 crc kubenswrapper[4675]: I1125 12:53:43.662303 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 12:53:43 crc kubenswrapper[4675]: I1125 12:53:43.662857 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 12:53:43 crc kubenswrapper[4675]: I1125 12:53:43.662905 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 12:53:43 crc kubenswrapper[4675]: I1125 12:53:43.663637 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 12:53:43 crc kubenswrapper[4675]: I1125 12:53:43.663690 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" gracePeriod=600 Nov 25 12:53:44 crc kubenswrapper[4675]: I1125 12:53:44.047997 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" exitCode=0 Nov 25 12:53:44 crc kubenswrapper[4675]: I1125 12:53:44.048039 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61"} Nov 25 12:53:44 crc kubenswrapper[4675]: I1125 12:53:44.048071 4675 scope.go:117] "RemoveContainer" containerID="d19f1130a91b11d8bc294a8adf419715b599ba329818568ac60752a3ee96613c" Nov 25 12:53:44 crc kubenswrapper[4675]: E1125 12:53:44.427063 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:53:45 crc kubenswrapper[4675]: I1125 12:53:45.058593 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:53:45 crc kubenswrapper[4675]: E1125 12:53:45.059259 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:53:55 crc kubenswrapper[4675]: I1125 12:53:55.545381 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:53:55 crc kubenswrapper[4675]: E1125 12:53:55.546139 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:54:09 crc kubenswrapper[4675]: I1125 12:54:09.532415 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:54:09 crc kubenswrapper[4675]: E1125 12:54:09.533130 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:54:20 crc kubenswrapper[4675]: I1125 12:54:20.532636 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:54:20 crc kubenswrapper[4675]: E1125 12:54:20.533339 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:54:34 crc kubenswrapper[4675]: I1125 12:54:34.532903 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:54:34 crc kubenswrapper[4675]: E1125 12:54:34.533527 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:54:45 crc kubenswrapper[4675]: I1125 12:54:45.544732 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:54:45 crc kubenswrapper[4675]: E1125 12:54:45.545459 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:54:58 crc kubenswrapper[4675]: I1125 12:54:58.532932 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:54:58 crc kubenswrapper[4675]: E1125 12:54:58.533674 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:54:59 crc kubenswrapper[4675]: I1125 12:54:59.066117 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-z5vkb"] Nov 25 12:54:59 crc kubenswrapper[4675]: I1125 12:54:59.078335 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-z5vkb"] Nov 25 12:54:59 crc kubenswrapper[4675]: I1125 12:54:59.090654 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-j4wk6"] Nov 25 12:54:59 crc kubenswrapper[4675]: I1125 12:54:59.103088 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-j4wk6"] Nov 25 12:54:59 crc kubenswrapper[4675]: I1125 12:54:59.543275 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54a0d51f-0bf4-40d2-891e-9b71f9014203" path="/var/lib/kubelet/pods/54a0d51f-0bf4-40d2-891e-9b71f9014203/volumes" Nov 25 12:54:59 crc kubenswrapper[4675]: I1125 12:54:59.545258 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2862e20-69d5-41fc-a821-bdffe2614102" path="/var/lib/kubelet/pods/d2862e20-69d5-41fc-a821-bdffe2614102/volumes" Nov 25 12:55:04 crc kubenswrapper[4675]: I1125 12:55:04.029052 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-7rn6x"] Nov 25 12:55:04 crc kubenswrapper[4675]: I1125 12:55:04.038999 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-7rn6x"] Nov 25 12:55:05 crc kubenswrapper[4675]: I1125 12:55:05.548014 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0" path="/var/lib/kubelet/pods/f772956e-b7a9-4cdc-a4c4-fe6d83f2a1e0/volumes" Nov 25 12:55:10 crc kubenswrapper[4675]: I1125 12:55:10.028180 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0d38-account-create-rbkxp"] Nov 25 12:55:10 crc kubenswrapper[4675]: I1125 12:55:10.035180 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-a7cb-account-create-jlj4x"] Nov 25 12:55:10 crc kubenswrapper[4675]: I1125 12:55:10.042394 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-a7cb-account-create-jlj4x"] Nov 25 12:55:10 crc kubenswrapper[4675]: I1125 12:55:10.051270 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0d38-account-create-rbkxp"] Nov 25 12:55:10 crc kubenswrapper[4675]: I1125 12:55:10.532493 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:55:10 crc kubenswrapper[4675]: E1125 12:55:10.533161 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:55:11 crc kubenswrapper[4675]: I1125 12:55:11.545614 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43901de0-d24f-4bee-aeaf-8c7c610b5312" path="/var/lib/kubelet/pods/43901de0-d24f-4bee-aeaf-8c7c610b5312/volumes" Nov 25 12:55:11 crc kubenswrapper[4675]: I1125 12:55:11.547151 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87535d76-93d8-4f78-bb2e-9c9a1ac266d3" path="/var/lib/kubelet/pods/87535d76-93d8-4f78-bb2e-9c9a1ac266d3/volumes" Nov 25 12:55:14 crc kubenswrapper[4675]: I1125 12:55:14.030618 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-4ef6-account-create-lvnrz"] Nov 25 12:55:14 crc kubenswrapper[4675]: I1125 12:55:14.039342 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-4ef6-account-create-lvnrz"] Nov 25 12:55:15 crc kubenswrapper[4675]: I1125 12:55:15.548487 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37d54c36-f165-4ca9-b1f6-95771e796399" path="/var/lib/kubelet/pods/37d54c36-f165-4ca9-b1f6-95771e796399/volumes" Nov 25 12:55:23 crc kubenswrapper[4675]: I1125 12:55:23.532801 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:55:23 crc kubenswrapper[4675]: E1125 12:55:23.534238 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:55:37 crc kubenswrapper[4675]: I1125 12:55:37.043333 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-vtdh7"] Nov 25 12:55:37 crc kubenswrapper[4675]: I1125 12:55:37.052591 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-vtdh7"] Nov 25 12:55:37 crc kubenswrapper[4675]: I1125 12:55:37.556204 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db9b274e-70fb-4e9d-bd32-28143b3b00f4" path="/var/lib/kubelet/pods/db9b274e-70fb-4e9d-bd32-28143b3b00f4/volumes" Nov 25 12:55:38 crc kubenswrapper[4675]: I1125 12:55:38.533453 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:55:38 crc kubenswrapper[4675]: E1125 12:55:38.534058 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:55:39 crc kubenswrapper[4675]: I1125 12:55:39.037061 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-7kq2m"] Nov 25 12:55:39 crc kubenswrapper[4675]: I1125 12:55:39.048049 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-sl4fq"] Nov 25 12:55:39 crc kubenswrapper[4675]: I1125 12:55:39.064143 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-7kq2m"] Nov 25 12:55:39 crc kubenswrapper[4675]: I1125 12:55:39.074771 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-sl4fq"] Nov 25 12:55:39 crc kubenswrapper[4675]: I1125 12:55:39.547398 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3" path="/var/lib/kubelet/pods/094f1ec1-9a5c-44d4-8706-4a1fc9cb31a3/volumes" Nov 25 12:55:39 crc kubenswrapper[4675]: I1125 12:55:39.549871 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fb050e8-6d74-4c4e-aa11-a5e86f109fb6" path="/var/lib/kubelet/pods/3fb050e8-6d74-4c4e-aa11-a5e86f109fb6/volumes" Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.031613 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-nqx9k"] Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.047908 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-nqx9k"] Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.174131 4675 generic.go:334] "Generic (PLEG): container finished" podID="3f838da8-c090-474d-826e-592b92857777" containerID="ba843a3c9ed67c74cb955c1a259b2fbc4d077d5f3248da75bb94ff3880a12f41" exitCode=0 Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.174172 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" event={"ID":"3f838da8-c090-474d-826e-592b92857777","Type":"ContainerDied","Data":"ba843a3c9ed67c74cb955c1a259b2fbc4d077d5f3248da75bb94ff3880a12f41"} Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.722898 4675 scope.go:117] "RemoveContainer" containerID="ce31f8ffd7789a4e06fba86090bc1ffd141dcc4554b9d256f8ec584572992a8f" Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.749439 4675 scope.go:117] "RemoveContainer" containerID="abcc8f0f57f25da6f6e957db44e8f545d449d978d0a9e4931e9fbaf2b0aff404" Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.804643 4675 scope.go:117] "RemoveContainer" containerID="ec5200d7880a086a872e91be8529cb051438d66d79987b8c7cf6a97bb92605ac" Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.854330 4675 scope.go:117] "RemoveContainer" containerID="f8e444b9e9e360b1918bbc1887ae55d686dedc8d934eb9f76eb42466dbc93553" Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.932176 4675 scope.go:117] "RemoveContainer" containerID="734f96fae17492b0510fbc953a6d266fbb25ca5f6ddb6394544e6f7701b3c7fb" Nov 25 12:55:44 crc kubenswrapper[4675]: I1125 12:55:44.963166 4675 scope.go:117] "RemoveContainer" containerID="d1161221510169542d73cd60466b25d57473e9204d86608e2caaa8f52232ecc5" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.001843 4675 scope.go:117] "RemoveContainer" containerID="853760c5e6c1422284de2885c97a93f75c559cf13f3fb457373aa0059258661c" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.025034 4675 scope.go:117] "RemoveContainer" containerID="e80326bdb7a154e1d2f55fcec5ebba9e87f9132f0604c27670106006c62cc497" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.043452 4675 scope.go:117] "RemoveContainer" containerID="da138338ad4f450445c9e8be5b6fa14cf8d93e93d05282c1254724b3dd7ce994" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.506788 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.542553 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5922e4e-e904-49f3-8661-d4cfb1dbebd2" path="/var/lib/kubelet/pods/a5922e4e-e904-49f3-8661-d4cfb1dbebd2/volumes" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.691686 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4r5j\" (UniqueName: \"kubernetes.io/projected/3f838da8-c090-474d-826e-592b92857777-kube-api-access-k4r5j\") pod \"3f838da8-c090-474d-826e-592b92857777\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.691790 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-ssh-key\") pod \"3f838da8-c090-474d-826e-592b92857777\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.691884 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-bootstrap-combined-ca-bundle\") pod \"3f838da8-c090-474d-826e-592b92857777\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.691904 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-inventory\") pod \"3f838da8-c090-474d-826e-592b92857777\" (UID: \"3f838da8-c090-474d-826e-592b92857777\") " Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.697084 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "3f838da8-c090-474d-826e-592b92857777" (UID: "3f838da8-c090-474d-826e-592b92857777"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.697653 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f838da8-c090-474d-826e-592b92857777-kube-api-access-k4r5j" (OuterVolumeSpecName: "kube-api-access-k4r5j") pod "3f838da8-c090-474d-826e-592b92857777" (UID: "3f838da8-c090-474d-826e-592b92857777"). InnerVolumeSpecName "kube-api-access-k4r5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.717376 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-inventory" (OuterVolumeSpecName: "inventory") pod "3f838da8-c090-474d-826e-592b92857777" (UID: "3f838da8-c090-474d-826e-592b92857777"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.719803 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3f838da8-c090-474d-826e-592b92857777" (UID: "3f838da8-c090-474d-826e-592b92857777"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.794211 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4r5j\" (UniqueName: \"kubernetes.io/projected/3f838da8-c090-474d-826e-592b92857777-kube-api-access-k4r5j\") on node \"crc\" DevicePath \"\"" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.794249 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.794272 4675 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 12:55:45 crc kubenswrapper[4675]: I1125 12:55:45.794285 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f838da8-c090-474d-826e-592b92857777-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.029951 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-2b6e-account-create-5pb28"] Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.040098 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-2b6e-account-create-5pb28"] Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.197231 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" event={"ID":"3f838da8-c090-474d-826e-592b92857777","Type":"ContainerDied","Data":"89fc6ac51b46de157ebdf11c080af4827db151064c52eaf49391d7fd7809974a"} Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.198222 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89fc6ac51b46de157ebdf11c080af4827db151064c52eaf49391d7fd7809974a" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.197334 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.279629 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8"] Nov 25 12:55:46 crc kubenswrapper[4675]: E1125 12:55:46.280016 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f838da8-c090-474d-826e-592b92857777" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.280032 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f838da8-c090-474d-826e-592b92857777" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.280224 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f838da8-c090-474d-826e-592b92857777" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.280776 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.286287 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.286409 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.286287 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.286970 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.292098 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8"] Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.312306 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.312527 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrxrg\" (UniqueName: \"kubernetes.io/projected/25fb1275-2632-4271-b41e-909adabbdf27-kube-api-access-jrxrg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.312687 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.414464 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrxrg\" (UniqueName: \"kubernetes.io/projected/25fb1275-2632-4271-b41e-909adabbdf27-kube-api-access-jrxrg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.414775 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.414975 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.422918 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.423226 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.431642 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrxrg\" (UniqueName: \"kubernetes.io/projected/25fb1275-2632-4271-b41e-909adabbdf27-kube-api-access-jrxrg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6stz8\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:46 crc kubenswrapper[4675]: I1125 12:55:46.597483 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:55:47 crc kubenswrapper[4675]: I1125 12:55:47.158365 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8"] Nov 25 12:55:47 crc kubenswrapper[4675]: W1125 12:55:47.163794 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25fb1275_2632_4271_b41e_909adabbdf27.slice/crio-11d42e80a4851045845fe4efee29acd322a599863605b1f32d7cefc4a5b1fefc WatchSource:0}: Error finding container 11d42e80a4851045845fe4efee29acd322a599863605b1f32d7cefc4a5b1fefc: Status 404 returned error can't find the container with id 11d42e80a4851045845fe4efee29acd322a599863605b1f32d7cefc4a5b1fefc Nov 25 12:55:47 crc kubenswrapper[4675]: I1125 12:55:47.168034 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 12:55:47 crc kubenswrapper[4675]: I1125 12:55:47.214257 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" event={"ID":"25fb1275-2632-4271-b41e-909adabbdf27","Type":"ContainerStarted","Data":"11d42e80a4851045845fe4efee29acd322a599863605b1f32d7cefc4a5b1fefc"} Nov 25 12:55:47 crc kubenswrapper[4675]: I1125 12:55:47.543753 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="398aafc4-3b57-4573-a72a-1b71b3a79383" path="/var/lib/kubelet/pods/398aafc4-3b57-4573-a72a-1b71b3a79383/volumes" Nov 25 12:55:48 crc kubenswrapper[4675]: I1125 12:55:48.223883 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" event={"ID":"25fb1275-2632-4271-b41e-909adabbdf27","Type":"ContainerStarted","Data":"0f745aebc4f48ddda152408120e517d8b61b0df449c781c18856beb93afb3be2"} Nov 25 12:55:48 crc kubenswrapper[4675]: I1125 12:55:48.245942 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" podStartSLOduration=1.648625961 podStartE2EDuration="2.245920115s" podCreationTimestamp="2025-11-25 12:55:46 +0000 UTC" firstStartedPulling="2025-11-25 12:55:47.167742683 +0000 UTC m=+1692.339335044" lastFinishedPulling="2025-11-25 12:55:47.765036857 +0000 UTC m=+1692.936629198" observedRunningTime="2025-11-25 12:55:48.242069801 +0000 UTC m=+1693.413662152" watchObservedRunningTime="2025-11-25 12:55:48.245920115 +0000 UTC m=+1693.417512466" Nov 25 12:55:51 crc kubenswrapper[4675]: I1125 12:55:51.036609 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-pbf54"] Nov 25 12:55:51 crc kubenswrapper[4675]: I1125 12:55:51.044778 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-pbf54"] Nov 25 12:55:51 crc kubenswrapper[4675]: I1125 12:55:51.533073 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:55:51 crc kubenswrapper[4675]: E1125 12:55:51.533423 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:55:51 crc kubenswrapper[4675]: I1125 12:55:51.544530 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d872b841-9c08-4120-880d-1d2803c8e3bd" path="/var/lib/kubelet/pods/d872b841-9c08-4120-880d-1d2803c8e3bd/volumes" Nov 25 12:55:58 crc kubenswrapper[4675]: I1125 12:55:58.050587 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-7322-account-create-hwqkt"] Nov 25 12:55:58 crc kubenswrapper[4675]: I1125 12:55:58.061616 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-7322-account-create-hwqkt"] Nov 25 12:55:59 crc kubenswrapper[4675]: I1125 12:55:59.030328 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-78c5-account-create-64jfv"] Nov 25 12:55:59 crc kubenswrapper[4675]: I1125 12:55:59.038620 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-78c5-account-create-64jfv"] Nov 25 12:55:59 crc kubenswrapper[4675]: I1125 12:55:59.546984 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ac2c87f-984d-4561-a825-b3b25be4e078" path="/var/lib/kubelet/pods/2ac2c87f-984d-4561-a825-b3b25be4e078/volumes" Nov 25 12:55:59 crc kubenswrapper[4675]: I1125 12:55:59.547989 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49a0347f-7b9b-461f-a5d7-1c803263ba15" path="/var/lib/kubelet/pods/49a0347f-7b9b-461f-a5d7-1c803263ba15/volumes" Nov 25 12:56:04 crc kubenswrapper[4675]: I1125 12:56:04.532083 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:56:04 crc kubenswrapper[4675]: E1125 12:56:04.532786 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:56:19 crc kubenswrapper[4675]: I1125 12:56:19.532643 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:56:19 crc kubenswrapper[4675]: E1125 12:56:19.533398 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:56:31 crc kubenswrapper[4675]: I1125 12:56:31.532843 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:56:31 crc kubenswrapper[4675]: E1125 12:56:31.533673 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:56:36 crc kubenswrapper[4675]: I1125 12:56:36.039867 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-ld5wn"] Nov 25 12:56:36 crc kubenswrapper[4675]: I1125 12:56:36.048992 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-ld5wn"] Nov 25 12:56:37 crc kubenswrapper[4675]: I1125 12:56:37.544339 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eba420bb-9044-4e38-bcd8-11e51c903cac" path="/var/lib/kubelet/pods/eba420bb-9044-4e38-bcd8-11e51c903cac/volumes" Nov 25 12:56:43 crc kubenswrapper[4675]: I1125 12:56:43.044757 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-slqsb"] Nov 25 12:56:43 crc kubenswrapper[4675]: I1125 12:56:43.055559 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-slqsb"] Nov 25 12:56:43 crc kubenswrapper[4675]: I1125 12:56:43.548273 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ddf9f0c-7045-4afe-945e-6c6f04f3e699" path="/var/lib/kubelet/pods/3ddf9f0c-7045-4afe-945e-6c6f04f3e699/volumes" Nov 25 12:56:44 crc kubenswrapper[4675]: I1125 12:56:44.532764 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:56:44 crc kubenswrapper[4675]: E1125 12:56:44.533018 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.240506 4675 scope.go:117] "RemoveContainer" containerID="d626183bde11ee3ed62ca10ecae3dc77a88bd27c1c257686dc9e439937c521fd" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.275926 4675 scope.go:117] "RemoveContainer" containerID="c50b4a4f865e84d2378b4dc7db967c21116952450827df54eb31dbaf87ce67e3" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.323430 4675 scope.go:117] "RemoveContainer" containerID="aba56ca1748ab9c460cc6b34d2d76f9c5f1cb56802ac688e8e0672d2a9b6ccbc" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.360708 4675 scope.go:117] "RemoveContainer" containerID="c2500178a20e65a654904b8164b1d955d20d4445d51534a1ce4d5d7f655c58ac" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.397579 4675 scope.go:117] "RemoveContainer" containerID="29fb2159f2df74be823280e69570f59540cbf4b8e04474c75160d506ee7fdf2a" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.446912 4675 scope.go:117] "RemoveContainer" containerID="f631c7b18a8798e0707d7b486d9edfa3dfa03ecc8cabfa8b440bd0877518ac1f" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.492057 4675 scope.go:117] "RemoveContainer" containerID="5616dc35c507a1d4da0cb3cfce369d6221b6fbf4c34065e4717fc0016d763fae" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.512067 4675 scope.go:117] "RemoveContainer" containerID="51ba83037e6556e175eacbf5a835e0dbab44b348701b90877519b4d7c93501c3" Nov 25 12:56:45 crc kubenswrapper[4675]: I1125 12:56:45.536790 4675 scope.go:117] "RemoveContainer" containerID="300473e1a8157ec44f3b32ab48854dc2894458c85303c1418f17bfaee60d6bdd" Nov 25 12:56:54 crc kubenswrapper[4675]: I1125 12:56:54.040558 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-mj5tw"] Nov 25 12:56:54 crc kubenswrapper[4675]: I1125 12:56:54.050746 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-mj5tw"] Nov 25 12:56:55 crc kubenswrapper[4675]: I1125 12:56:55.550355 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92b6601f-6d94-4208-843b-a0fe1aac75ed" path="/var/lib/kubelet/pods/92b6601f-6d94-4208-843b-a0fe1aac75ed/volumes" Nov 25 12:56:57 crc kubenswrapper[4675]: I1125 12:56:57.532991 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:56:57 crc kubenswrapper[4675]: E1125 12:56:57.533616 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:57:01 crc kubenswrapper[4675]: I1125 12:57:01.027861 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-rxzpl"] Nov 25 12:57:01 crc kubenswrapper[4675]: I1125 12:57:01.038369 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-rxzpl"] Nov 25 12:57:01 crc kubenswrapper[4675]: I1125 12:57:01.549469 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ee67608-bfaa-407c-9256-488729244fe0" path="/var/lib/kubelet/pods/8ee67608-bfaa-407c-9256-488729244fe0/volumes" Nov 25 12:57:08 crc kubenswrapper[4675]: I1125 12:57:08.533300 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:57:08 crc kubenswrapper[4675]: E1125 12:57:08.535173 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:57:20 crc kubenswrapper[4675]: I1125 12:57:20.532059 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:57:20 crc kubenswrapper[4675]: E1125 12:57:20.533024 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:57:21 crc kubenswrapper[4675]: I1125 12:57:21.032335 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-rhqss"] Nov 25 12:57:21 crc kubenswrapper[4675]: I1125 12:57:21.040651 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-rhqss"] Nov 25 12:57:21 crc kubenswrapper[4675]: I1125 12:57:21.621696 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c05847eb-7376-4c25-96e6-9218fa514493" path="/var/lib/kubelet/pods/c05847eb-7376-4c25-96e6-9218fa514493/volumes" Nov 25 12:57:33 crc kubenswrapper[4675]: I1125 12:57:33.534352 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:57:33 crc kubenswrapper[4675]: E1125 12:57:33.535033 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:57:44 crc kubenswrapper[4675]: I1125 12:57:44.217982 4675 generic.go:334] "Generic (PLEG): container finished" podID="25fb1275-2632-4271-b41e-909adabbdf27" containerID="0f745aebc4f48ddda152408120e517d8b61b0df449c781c18856beb93afb3be2" exitCode=0 Nov 25 12:57:44 crc kubenswrapper[4675]: I1125 12:57:44.218078 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" event={"ID":"25fb1275-2632-4271-b41e-909adabbdf27","Type":"ContainerDied","Data":"0f745aebc4f48ddda152408120e517d8b61b0df449c781c18856beb93afb3be2"} Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.658272 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.722996 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory\") pod \"25fb1275-2632-4271-b41e-909adabbdf27\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.723111 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-ssh-key\") pod \"25fb1275-2632-4271-b41e-909adabbdf27\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.723287 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrxrg\" (UniqueName: \"kubernetes.io/projected/25fb1275-2632-4271-b41e-909adabbdf27-kube-api-access-jrxrg\") pod \"25fb1275-2632-4271-b41e-909adabbdf27\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.730096 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25fb1275-2632-4271-b41e-909adabbdf27-kube-api-access-jrxrg" (OuterVolumeSpecName: "kube-api-access-jrxrg") pod "25fb1275-2632-4271-b41e-909adabbdf27" (UID: "25fb1275-2632-4271-b41e-909adabbdf27"). InnerVolumeSpecName "kube-api-access-jrxrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.742036 4675 scope.go:117] "RemoveContainer" containerID="1dc687f07b67ce27514732f4d864510d41ed685d9cd44255eb7b27574caccc55" Nov 25 12:57:45 crc kubenswrapper[4675]: E1125 12:57:45.750716 4675 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory podName:25fb1275-2632-4271-b41e-909adabbdf27 nodeName:}" failed. No retries permitted until 2025-11-25 12:57:46.250682079 +0000 UTC m=+1811.422274420 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory") pod "25fb1275-2632-4271-b41e-909adabbdf27" (UID: "25fb1275-2632-4271-b41e-909adabbdf27") : error deleting /var/lib/kubelet/pods/25fb1275-2632-4271-b41e-909adabbdf27/volume-subpaths: remove /var/lib/kubelet/pods/25fb1275-2632-4271-b41e-909adabbdf27/volume-subpaths: no such file or directory Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.754748 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "25fb1275-2632-4271-b41e-909adabbdf27" (UID: "25fb1275-2632-4271-b41e-909adabbdf27"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.825592 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrxrg\" (UniqueName: \"kubernetes.io/projected/25fb1275-2632-4271-b41e-909adabbdf27-kube-api-access-jrxrg\") on node \"crc\" DevicePath \"\"" Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.825816 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.851488 4675 scope.go:117] "RemoveContainer" containerID="0b663ec7c4f5e1fe8005bb05b1f4b514f633f609b2e481aa9cc021138088547d" Nov 25 12:57:45 crc kubenswrapper[4675]: I1125 12:57:45.904953 4675 scope.go:117] "RemoveContainer" containerID="d22778495631214cfd4c1334c0b28ef3bb60257cff4de3ee737bd9542c4e93e8" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.239519 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" event={"ID":"25fb1275-2632-4271-b41e-909adabbdf27","Type":"ContainerDied","Data":"11d42e80a4851045845fe4efee29acd322a599863605b1f32d7cefc4a5b1fefc"} Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.239564 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11d42e80a4851045845fe4efee29acd322a599863605b1f32d7cefc4a5b1fefc" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.239567 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6stz8" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.316508 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb"] Nov 25 12:57:46 crc kubenswrapper[4675]: E1125 12:57:46.317017 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25fb1275-2632-4271-b41e-909adabbdf27" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.317039 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="25fb1275-2632-4271-b41e-909adabbdf27" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.317232 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="25fb1275-2632-4271-b41e-909adabbdf27" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.318072 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.328349 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb"] Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.336387 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory\") pod \"25fb1275-2632-4271-b41e-909adabbdf27\" (UID: \"25fb1275-2632-4271-b41e-909adabbdf27\") " Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.354348 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory" (OuterVolumeSpecName: "inventory") pod "25fb1275-2632-4271-b41e-909adabbdf27" (UID: "25fb1275-2632-4271-b41e-909adabbdf27"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.438109 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8c86\" (UniqueName: \"kubernetes.io/projected/17c7ad50-1a29-478e-b1df-a0084c3142df-kube-api-access-f8c86\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.438212 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.438277 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.438430 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25fb1275-2632-4271-b41e-909adabbdf27-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.540259 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.540356 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.540609 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8c86\" (UniqueName: \"kubernetes.io/projected/17c7ad50-1a29-478e-b1df-a0084c3142df-kube-api-access-f8c86\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.549132 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.556348 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.562503 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8c86\" (UniqueName: \"kubernetes.io/projected/17c7ad50-1a29-478e-b1df-a0084c3142df-kube-api-access-f8c86\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:46 crc kubenswrapper[4675]: I1125 12:57:46.656117 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:57:47 crc kubenswrapper[4675]: I1125 12:57:47.200374 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb"] Nov 25 12:57:47 crc kubenswrapper[4675]: I1125 12:57:47.250438 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" event={"ID":"17c7ad50-1a29-478e-b1df-a0084c3142df","Type":"ContainerStarted","Data":"05519e97dc07e7780bf5d52276555e22928aeaee16e0558b09f62db3f6fe0460"} Nov 25 12:57:48 crc kubenswrapper[4675]: I1125 12:57:48.261666 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" event={"ID":"17c7ad50-1a29-478e-b1df-a0084c3142df","Type":"ContainerStarted","Data":"86e8f1e5e0c4a57de71b3ebeb9d2e7537d007ead02c4809efcaaedba4a189861"} Nov 25 12:57:48 crc kubenswrapper[4675]: I1125 12:57:48.283613 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" podStartSLOduration=1.823638519 podStartE2EDuration="2.283589904s" podCreationTimestamp="2025-11-25 12:57:46 +0000 UTC" firstStartedPulling="2025-11-25 12:57:47.206157246 +0000 UTC m=+1812.377749587" lastFinishedPulling="2025-11-25 12:57:47.666108631 +0000 UTC m=+1812.837700972" observedRunningTime="2025-11-25 12:57:48.280015718 +0000 UTC m=+1813.451608069" watchObservedRunningTime="2025-11-25 12:57:48.283589904 +0000 UTC m=+1813.455182255" Nov 25 12:57:48 crc kubenswrapper[4675]: I1125 12:57:48.533065 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:57:48 crc kubenswrapper[4675]: E1125 12:57:48.533393 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:57:59 crc kubenswrapper[4675]: I1125 12:57:59.534024 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:57:59 crc kubenswrapper[4675]: E1125 12:57:59.534917 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:58:10 crc kubenswrapper[4675]: I1125 12:58:10.044751 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-q7g4j"] Nov 25 12:58:10 crc kubenswrapper[4675]: I1125 12:58:10.056696 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-8v95j"] Nov 25 12:58:10 crc kubenswrapper[4675]: I1125 12:58:10.067476 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-5646n"] Nov 25 12:58:10 crc kubenswrapper[4675]: I1125 12:58:10.074486 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-5646n"] Nov 25 12:58:10 crc kubenswrapper[4675]: I1125 12:58:10.081830 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-q7g4j"] Nov 25 12:58:10 crc kubenswrapper[4675]: I1125 12:58:10.088837 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-8v95j"] Nov 25 12:58:11 crc kubenswrapper[4675]: I1125 12:58:11.533524 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:58:11 crc kubenswrapper[4675]: E1125 12:58:11.534379 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:58:11 crc kubenswrapper[4675]: I1125 12:58:11.555538 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06abd7e9-ef14-4166-9897-27f51471ee36" path="/var/lib/kubelet/pods/06abd7e9-ef14-4166-9897-27f51471ee36/volumes" Nov 25 12:58:11 crc kubenswrapper[4675]: I1125 12:58:11.556653 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f517bd1-b71d-4bd5-8570-dda1efd3e4ce" path="/var/lib/kubelet/pods/4f517bd1-b71d-4bd5-8570-dda1efd3e4ce/volumes" Nov 25 12:58:11 crc kubenswrapper[4675]: I1125 12:58:11.557499 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d11cfa02-10db-4ea2-a713-bd7b18d1c65d" path="/var/lib/kubelet/pods/d11cfa02-10db-4ea2-a713-bd7b18d1c65d/volumes" Nov 25 12:58:20 crc kubenswrapper[4675]: I1125 12:58:20.034188 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-46b5-account-create-lxxth"] Nov 25 12:58:20 crc kubenswrapper[4675]: I1125 12:58:20.046788 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-46b5-account-create-lxxth"] Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.026717 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e721-account-create-hv52z"] Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.041941 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-79eb-account-create-8tls9"] Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.060210 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-e721-account-create-hv52z"] Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.072016 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-79eb-account-create-8tls9"] Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.544305 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e" path="/var/lib/kubelet/pods/9c8b4ba0-95b9-48b2-b62d-98bf477fdf8e/volumes" Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.545427 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7dc2e46-7016-47c0-8074-9a5aa01dbdb5" path="/var/lib/kubelet/pods/b7dc2e46-7016-47c0-8074-9a5aa01dbdb5/volumes" Nov 25 12:58:21 crc kubenswrapper[4675]: I1125 12:58:21.546168 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf34c61d-c354-49a2-9956-3e3f0c09f6c3" path="/var/lib/kubelet/pods/bf34c61d-c354-49a2-9956-3e3f0c09f6c3/volumes" Nov 25 12:58:25 crc kubenswrapper[4675]: I1125 12:58:25.543300 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:58:25 crc kubenswrapper[4675]: E1125 12:58:25.544374 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:58:37 crc kubenswrapper[4675]: I1125 12:58:37.532628 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:58:37 crc kubenswrapper[4675]: E1125 12:58:37.533472 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 12:58:45 crc kubenswrapper[4675]: I1125 12:58:45.989161 4675 scope.go:117] "RemoveContainer" containerID="3d2f5d4f805c6b32548d3269cb8b5d27a9b6ffe117487ebfa3d76979ab7ac3d0" Nov 25 12:58:46 crc kubenswrapper[4675]: I1125 12:58:46.014216 4675 scope.go:117] "RemoveContainer" containerID="0af2c2716a15891566813fd756a3847715e200356572cc057335c5ab988fbf4f" Nov 25 12:58:46 crc kubenswrapper[4675]: I1125 12:58:46.062353 4675 scope.go:117] "RemoveContainer" containerID="beb4334708cf53ccd644d9475bfcefa3412a4b1b6d74b22ef01aaa3af33f9f18" Nov 25 12:58:46 crc kubenswrapper[4675]: I1125 12:58:46.111270 4675 scope.go:117] "RemoveContainer" containerID="a390ef7b9cc37457e5c549631b8fb2d772a9e241e9a82d94c1cd4b7526da8b9c" Nov 25 12:58:46 crc kubenswrapper[4675]: I1125 12:58:46.171405 4675 scope.go:117] "RemoveContainer" containerID="87b8d2e878db95a227a55dacefb33f9defc9bf9a7547455a7b2b9e6a5b1cf07e" Nov 25 12:58:46 crc kubenswrapper[4675]: I1125 12:58:46.215760 4675 scope.go:117] "RemoveContainer" containerID="f5b712a84cc78eea396104e526aca481eabc08f8d1dcc5bd70d1d4daad441783" Nov 25 12:58:50 crc kubenswrapper[4675]: I1125 12:58:50.532440 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 12:58:50 crc kubenswrapper[4675]: I1125 12:58:50.780611 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"77326986e63dd4ee6e1e015549b847b50974a5865176ff1042fefaba40517e76"} Nov 25 12:59:00 crc kubenswrapper[4675]: I1125 12:59:00.037008 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr22c"] Nov 25 12:59:00 crc kubenswrapper[4675]: I1125 12:59:00.045104 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr22c"] Nov 25 12:59:01 crc kubenswrapper[4675]: I1125 12:59:01.543279 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0babe492-76ca-4609-b35f-e1d613d06078" path="/var/lib/kubelet/pods/0babe492-76ca-4609-b35f-e1d613d06078/volumes" Nov 25 12:59:07 crc kubenswrapper[4675]: I1125 12:59:07.941238 4675 generic.go:334] "Generic (PLEG): container finished" podID="17c7ad50-1a29-478e-b1df-a0084c3142df" containerID="86e8f1e5e0c4a57de71b3ebeb9d2e7537d007ead02c4809efcaaedba4a189861" exitCode=0 Nov 25 12:59:07 crc kubenswrapper[4675]: I1125 12:59:07.941288 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" event={"ID":"17c7ad50-1a29-478e-b1df-a0084c3142df","Type":"ContainerDied","Data":"86e8f1e5e0c4a57de71b3ebeb9d2e7537d007ead02c4809efcaaedba4a189861"} Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.532082 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.634642 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-inventory\") pod \"17c7ad50-1a29-478e-b1df-a0084c3142df\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.634772 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8c86\" (UniqueName: \"kubernetes.io/projected/17c7ad50-1a29-478e-b1df-a0084c3142df-kube-api-access-f8c86\") pod \"17c7ad50-1a29-478e-b1df-a0084c3142df\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.634936 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-ssh-key\") pod \"17c7ad50-1a29-478e-b1df-a0084c3142df\" (UID: \"17c7ad50-1a29-478e-b1df-a0084c3142df\") " Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.641260 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17c7ad50-1a29-478e-b1df-a0084c3142df-kube-api-access-f8c86" (OuterVolumeSpecName: "kube-api-access-f8c86") pod "17c7ad50-1a29-478e-b1df-a0084c3142df" (UID: "17c7ad50-1a29-478e-b1df-a0084c3142df"). InnerVolumeSpecName "kube-api-access-f8c86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.666677 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "17c7ad50-1a29-478e-b1df-a0084c3142df" (UID: "17c7ad50-1a29-478e-b1df-a0084c3142df"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.668944 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-inventory" (OuterVolumeSpecName: "inventory") pod "17c7ad50-1a29-478e-b1df-a0084c3142df" (UID: "17c7ad50-1a29-478e-b1df-a0084c3142df"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.736808 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.736863 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8c86\" (UniqueName: \"kubernetes.io/projected/17c7ad50-1a29-478e-b1df-a0084c3142df-kube-api-access-f8c86\") on node \"crc\" DevicePath \"\"" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.736877 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c7ad50-1a29-478e-b1df-a0084c3142df-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.962007 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" event={"ID":"17c7ad50-1a29-478e-b1df-a0084c3142df","Type":"ContainerDied","Data":"05519e97dc07e7780bf5d52276555e22928aeaee16e0558b09f62db3f6fe0460"} Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.962419 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05519e97dc07e7780bf5d52276555e22928aeaee16e0558b09f62db3f6fe0460" Nov 25 12:59:09 crc kubenswrapper[4675]: I1125 12:59:09.962056 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.059237 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9"] Nov 25 12:59:10 crc kubenswrapper[4675]: E1125 12:59:10.059699 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17c7ad50-1a29-478e-b1df-a0084c3142df" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.059722 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="17c7ad50-1a29-478e-b1df-a0084c3142df" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.060024 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="17c7ad50-1a29-478e-b1df-a0084c3142df" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.060858 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.063889 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.064136 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.064342 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.064459 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.070609 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9"] Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.246669 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.246897 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.246988 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgbg5\" (UniqueName: \"kubernetes.io/projected/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-kube-api-access-zgbg5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.349167 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.349614 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.349860 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgbg5\" (UniqueName: \"kubernetes.io/projected/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-kube-api-access-zgbg5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.355998 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.362446 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.373348 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgbg5\" (UniqueName: \"kubernetes.io/projected/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-kube-api-access-zgbg5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.382771 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.910416 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9"] Nov 25 12:59:10 crc kubenswrapper[4675]: I1125 12:59:10.974011 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" event={"ID":"92805c79-2eb0-4562-9aed-1a7c7b88a5aa","Type":"ContainerStarted","Data":"decd1efbcc0253dc3d605bb5533d7cd822240f36b94b378667aa5c09de93b085"} Nov 25 12:59:12 crc kubenswrapper[4675]: I1125 12:59:12.992225 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" event={"ID":"92805c79-2eb0-4562-9aed-1a7c7b88a5aa","Type":"ContainerStarted","Data":"b4ef97fe6034afdd35cef19eaf2615eced2e51eb3f868f7de165f885d7307b88"} Nov 25 12:59:13 crc kubenswrapper[4675]: I1125 12:59:13.020570 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" podStartSLOduration=1.843414137 podStartE2EDuration="3.02054809s" podCreationTimestamp="2025-11-25 12:59:10 +0000 UTC" firstStartedPulling="2025-11-25 12:59:10.922455542 +0000 UTC m=+1896.094047883" lastFinishedPulling="2025-11-25 12:59:12.099589495 +0000 UTC m=+1897.271181836" observedRunningTime="2025-11-25 12:59:13.009050048 +0000 UTC m=+1898.180642449" watchObservedRunningTime="2025-11-25 12:59:13.02054809 +0000 UTC m=+1898.192140461" Nov 25 12:59:18 crc kubenswrapper[4675]: I1125 12:59:18.033159 4675 generic.go:334] "Generic (PLEG): container finished" podID="92805c79-2eb0-4562-9aed-1a7c7b88a5aa" containerID="b4ef97fe6034afdd35cef19eaf2615eced2e51eb3f868f7de165f885d7307b88" exitCode=0 Nov 25 12:59:18 crc kubenswrapper[4675]: I1125 12:59:18.033288 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" event={"ID":"92805c79-2eb0-4562-9aed-1a7c7b88a5aa","Type":"ContainerDied","Data":"b4ef97fe6034afdd35cef19eaf2615eced2e51eb3f868f7de165f885d7307b88"} Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.511892 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.631607 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-ssh-key\") pod \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.631674 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgbg5\" (UniqueName: \"kubernetes.io/projected/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-kube-api-access-zgbg5\") pod \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.631888 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-inventory\") pod \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\" (UID: \"92805c79-2eb0-4562-9aed-1a7c7b88a5aa\") " Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.637370 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-kube-api-access-zgbg5" (OuterVolumeSpecName: "kube-api-access-zgbg5") pod "92805c79-2eb0-4562-9aed-1a7c7b88a5aa" (UID: "92805c79-2eb0-4562-9aed-1a7c7b88a5aa"). InnerVolumeSpecName "kube-api-access-zgbg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.669527 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "92805c79-2eb0-4562-9aed-1a7c7b88a5aa" (UID: "92805c79-2eb0-4562-9aed-1a7c7b88a5aa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.674961 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-inventory" (OuterVolumeSpecName: "inventory") pod "92805c79-2eb0-4562-9aed-1a7c7b88a5aa" (UID: "92805c79-2eb0-4562-9aed-1a7c7b88a5aa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.734803 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.734858 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgbg5\" (UniqueName: \"kubernetes.io/projected/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-kube-api-access-zgbg5\") on node \"crc\" DevicePath \"\"" Nov 25 12:59:19 crc kubenswrapper[4675]: I1125 12:59:19.734871 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92805c79-2eb0-4562-9aed-1a7c7b88a5aa-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.052279 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.053042 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9" event={"ID":"92805c79-2eb0-4562-9aed-1a7c7b88a5aa","Type":"ContainerDied","Data":"decd1efbcc0253dc3d605bb5533d7cd822240f36b94b378667aa5c09de93b085"} Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.053160 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="decd1efbcc0253dc3d605bb5533d7cd822240f36b94b378667aa5c09de93b085" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.147190 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd"] Nov 25 12:59:20 crc kubenswrapper[4675]: E1125 12:59:20.147647 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92805c79-2eb0-4562-9aed-1a7c7b88a5aa" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.147671 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="92805c79-2eb0-4562-9aed-1a7c7b88a5aa" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.147978 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="92805c79-2eb0-4562-9aed-1a7c7b88a5aa" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.153845 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.157233 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.157398 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.161806 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.162215 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.178672 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd"] Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.243361 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4vwr\" (UniqueName: \"kubernetes.io/projected/21e88661-854c-481d-b024-c7c87ea9373a-kube-api-access-s4vwr\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.243536 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.243570 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.359061 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.359162 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.359356 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4vwr\" (UniqueName: \"kubernetes.io/projected/21e88661-854c-481d-b024-c7c87ea9373a-kube-api-access-s4vwr\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.370875 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.371369 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.379856 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4vwr\" (UniqueName: \"kubernetes.io/projected/21e88661-854c-481d-b024-c7c87ea9373a-kube-api-access-s4vwr\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vgqfd\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:20 crc kubenswrapper[4675]: I1125 12:59:20.472983 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 12:59:21 crc kubenswrapper[4675]: I1125 12:59:21.020758 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd"] Nov 25 12:59:21 crc kubenswrapper[4675]: I1125 12:59:21.061948 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" event={"ID":"21e88661-854c-481d-b024-c7c87ea9373a","Type":"ContainerStarted","Data":"34c8496165b3dd88656dfd232d06c405b806e84e38cec6eab7426cbdd1fb5a70"} Nov 25 12:59:22 crc kubenswrapper[4675]: I1125 12:59:22.080717 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" event={"ID":"21e88661-854c-481d-b024-c7c87ea9373a","Type":"ContainerStarted","Data":"94a42985894c0f7b7ebedd99c23544767129082f5a16f1c4c556175dfc2854a8"} Nov 25 12:59:22 crc kubenswrapper[4675]: I1125 12:59:22.124907 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" podStartSLOduration=1.718481049 podStartE2EDuration="2.124886737s" podCreationTimestamp="2025-11-25 12:59:20 +0000 UTC" firstStartedPulling="2025-11-25 12:59:21.024852469 +0000 UTC m=+1906.196444820" lastFinishedPulling="2025-11-25 12:59:21.431258167 +0000 UTC m=+1906.602850508" observedRunningTime="2025-11-25 12:59:22.11820042 +0000 UTC m=+1907.289792761" watchObservedRunningTime="2025-11-25 12:59:22.124886737 +0000 UTC m=+1907.296479078" Nov 25 12:59:25 crc kubenswrapper[4675]: I1125 12:59:25.045688 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-mlk8r"] Nov 25 12:59:25 crc kubenswrapper[4675]: I1125 12:59:25.055650 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-mlk8r"] Nov 25 12:59:25 crc kubenswrapper[4675]: I1125 12:59:25.576286 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab7f6527-92c4-48fb-b187-bb9dc5e6aac2" path="/var/lib/kubelet/pods/ab7f6527-92c4-48fb-b187-bb9dc5e6aac2/volumes" Nov 25 12:59:26 crc kubenswrapper[4675]: I1125 12:59:26.027130 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4fs77"] Nov 25 12:59:26 crc kubenswrapper[4675]: I1125 12:59:26.033616 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4fs77"] Nov 25 12:59:27 crc kubenswrapper[4675]: I1125 12:59:27.544528 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06e0ca7d-c361-435c-90ac-29ff3e601751" path="/var/lib/kubelet/pods/06e0ca7d-c361-435c-90ac-29ff3e601751/volumes" Nov 25 12:59:46 crc kubenswrapper[4675]: I1125 12:59:46.369346 4675 scope.go:117] "RemoveContainer" containerID="67fd9d608c15ce7038ac66ff945d10e638b4dfb0ba706ed14583307038b4ac9d" Nov 25 12:59:46 crc kubenswrapper[4675]: I1125 12:59:46.424706 4675 scope.go:117] "RemoveContainer" containerID="a91bba163fe4f08f11d7cd7c7301ac2cde5f3ac384d3b5e3ec5968003e5069c1" Nov 25 12:59:46 crc kubenswrapper[4675]: I1125 12:59:46.472295 4675 scope.go:117] "RemoveContainer" containerID="943d5bedc67a845ec3273139aa12561bb2c11c86b87d0f868b4a93a9b954049c" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.171230 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr"] Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.172891 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.175445 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.175671 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.183293 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr"] Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.310381 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7529\" (UniqueName: \"kubernetes.io/projected/1e881be8-6492-4fca-9689-e7d63c9336e5-kube-api-access-b7529\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.311081 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e881be8-6492-4fca-9689-e7d63c9336e5-config-volume\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.311185 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e881be8-6492-4fca-9689-e7d63c9336e5-secret-volume\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.413143 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7529\" (UniqueName: \"kubernetes.io/projected/1e881be8-6492-4fca-9689-e7d63c9336e5-kube-api-access-b7529\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.413265 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e881be8-6492-4fca-9689-e7d63c9336e5-config-volume\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.413306 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e881be8-6492-4fca-9689-e7d63c9336e5-secret-volume\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.414650 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e881be8-6492-4fca-9689-e7d63c9336e5-config-volume\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.431291 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e881be8-6492-4fca-9689-e7d63c9336e5-secret-volume\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.433596 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7529\" (UniqueName: \"kubernetes.io/projected/1e881be8-6492-4fca-9689-e7d63c9336e5-kube-api-access-b7529\") pod \"collect-profiles-29401260-zj7kr\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.492868 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:00 crc kubenswrapper[4675]: I1125 13:00:00.951316 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr"] Nov 25 13:00:01 crc kubenswrapper[4675]: I1125 13:00:01.414969 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" event={"ID":"1e881be8-6492-4fca-9689-e7d63c9336e5","Type":"ContainerStarted","Data":"5552c6d07ffdd5d0cc63eb36a3f3e6b3072e4122b0630d53da919f8b5fd18248"} Nov 25 13:00:01 crc kubenswrapper[4675]: I1125 13:00:01.415331 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" event={"ID":"1e881be8-6492-4fca-9689-e7d63c9336e5","Type":"ContainerStarted","Data":"7f55e2db8e4beafdaffb230fdbaf02d77e51fd5bd157d4d14b300beb9bd09a77"} Nov 25 13:00:01 crc kubenswrapper[4675]: I1125 13:00:01.447908 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" podStartSLOduration=1.447889883 podStartE2EDuration="1.447889883s" podCreationTimestamp="2025-11-25 13:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:00:01.432000509 +0000 UTC m=+1946.603592860" watchObservedRunningTime="2025-11-25 13:00:01.447889883 +0000 UTC m=+1946.619482224" Nov 25 13:00:02 crc kubenswrapper[4675]: I1125 13:00:02.426223 4675 generic.go:334] "Generic (PLEG): container finished" podID="21e88661-854c-481d-b024-c7c87ea9373a" containerID="94a42985894c0f7b7ebedd99c23544767129082f5a16f1c4c556175dfc2854a8" exitCode=0 Nov 25 13:00:02 crc kubenswrapper[4675]: I1125 13:00:02.426307 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" event={"ID":"21e88661-854c-481d-b024-c7c87ea9373a","Type":"ContainerDied","Data":"94a42985894c0f7b7ebedd99c23544767129082f5a16f1c4c556175dfc2854a8"} Nov 25 13:00:03 crc kubenswrapper[4675]: I1125 13:00:03.434777 4675 generic.go:334] "Generic (PLEG): container finished" podID="1e881be8-6492-4fca-9689-e7d63c9336e5" containerID="5552c6d07ffdd5d0cc63eb36a3f3e6b3072e4122b0630d53da919f8b5fd18248" exitCode=0 Nov 25 13:00:03 crc kubenswrapper[4675]: I1125 13:00:03.434882 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" event={"ID":"1e881be8-6492-4fca-9689-e7d63c9336e5","Type":"ContainerDied","Data":"5552c6d07ffdd5d0cc63eb36a3f3e6b3072e4122b0630d53da919f8b5fd18248"} Nov 25 13:00:03 crc kubenswrapper[4675]: I1125 13:00:03.847634 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 13:00:03 crc kubenswrapper[4675]: I1125 13:00:03.996006 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4vwr\" (UniqueName: \"kubernetes.io/projected/21e88661-854c-481d-b024-c7c87ea9373a-kube-api-access-s4vwr\") pod \"21e88661-854c-481d-b024-c7c87ea9373a\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " Nov 25 13:00:03 crc kubenswrapper[4675]: I1125 13:00:03.996064 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-ssh-key\") pod \"21e88661-854c-481d-b024-c7c87ea9373a\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " Nov 25 13:00:03 crc kubenswrapper[4675]: I1125 13:00:03.996209 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-inventory\") pod \"21e88661-854c-481d-b024-c7c87ea9373a\" (UID: \"21e88661-854c-481d-b024-c7c87ea9373a\") " Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.002082 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21e88661-854c-481d-b024-c7c87ea9373a-kube-api-access-s4vwr" (OuterVolumeSpecName: "kube-api-access-s4vwr") pod "21e88661-854c-481d-b024-c7c87ea9373a" (UID: "21e88661-854c-481d-b024-c7c87ea9373a"). InnerVolumeSpecName "kube-api-access-s4vwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.026678 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-inventory" (OuterVolumeSpecName: "inventory") pod "21e88661-854c-481d-b024-c7c87ea9373a" (UID: "21e88661-854c-481d-b024-c7c87ea9373a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.032859 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "21e88661-854c-481d-b024-c7c87ea9373a" (UID: "21e88661-854c-481d-b024-c7c87ea9373a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.098603 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.098894 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4vwr\" (UniqueName: \"kubernetes.io/projected/21e88661-854c-481d-b024-c7c87ea9373a-kube-api-access-s4vwr\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.098999 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/21e88661-854c-481d-b024-c7c87ea9373a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.444917 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.444987 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vgqfd" event={"ID":"21e88661-854c-481d-b024-c7c87ea9373a","Type":"ContainerDied","Data":"34c8496165b3dd88656dfd232d06c405b806e84e38cec6eab7426cbdd1fb5a70"} Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.445056 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34c8496165b3dd88656dfd232d06c405b806e84e38cec6eab7426cbdd1fb5a70" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.557644 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5"] Nov 25 13:00:04 crc kubenswrapper[4675]: E1125 13:00:04.558122 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21e88661-854c-481d-b024-c7c87ea9373a" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.558149 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="21e88661-854c-481d-b024-c7c87ea9373a" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.558424 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="21e88661-854c-481d-b024-c7c87ea9373a" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.559249 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.565390 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.565594 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.565939 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.575335 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5"] Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.581856 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.629011 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.629472 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr5vk\" (UniqueName: \"kubernetes.io/projected/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-kube-api-access-lr5vk\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.629651 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.732002 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.732120 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.732265 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr5vk\" (UniqueName: \"kubernetes.io/projected/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-kube-api-access-lr5vk\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.738838 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.740361 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.750438 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr5vk\" (UniqueName: \"kubernetes.io/projected/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-kube-api-access-lr5vk\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.805912 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.833655 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e881be8-6492-4fca-9689-e7d63c9336e5-config-volume\") pod \"1e881be8-6492-4fca-9689-e7d63c9336e5\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.833932 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e881be8-6492-4fca-9689-e7d63c9336e5-secret-volume\") pod \"1e881be8-6492-4fca-9689-e7d63c9336e5\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.833960 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7529\" (UniqueName: \"kubernetes.io/projected/1e881be8-6492-4fca-9689-e7d63c9336e5-kube-api-access-b7529\") pod \"1e881be8-6492-4fca-9689-e7d63c9336e5\" (UID: \"1e881be8-6492-4fca-9689-e7d63c9336e5\") " Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.834363 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e881be8-6492-4fca-9689-e7d63c9336e5-config-volume" (OuterVolumeSpecName: "config-volume") pod "1e881be8-6492-4fca-9689-e7d63c9336e5" (UID: "1e881be8-6492-4fca-9689-e7d63c9336e5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.835219 4675 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e881be8-6492-4fca-9689-e7d63c9336e5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.837888 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e881be8-6492-4fca-9689-e7d63c9336e5-kube-api-access-b7529" (OuterVolumeSpecName: "kube-api-access-b7529") pod "1e881be8-6492-4fca-9689-e7d63c9336e5" (UID: "1e881be8-6492-4fca-9689-e7d63c9336e5"). InnerVolumeSpecName "kube-api-access-b7529". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.840251 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e881be8-6492-4fca-9689-e7d63c9336e5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1e881be8-6492-4fca-9689-e7d63c9336e5" (UID: "1e881be8-6492-4fca-9689-e7d63c9336e5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.902933 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.937015 4675 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e881be8-6492-4fca-9689-e7d63c9336e5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:04 crc kubenswrapper[4675]: I1125 13:00:04.937056 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7529\" (UniqueName: \"kubernetes.io/projected/1e881be8-6492-4fca-9689-e7d63c9336e5-kube-api-access-b7529\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:05 crc kubenswrapper[4675]: I1125 13:00:05.453635 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" event={"ID":"1e881be8-6492-4fca-9689-e7d63c9336e5","Type":"ContainerDied","Data":"7f55e2db8e4beafdaffb230fdbaf02d77e51fd5bd157d4d14b300beb9bd09a77"} Nov 25 13:00:05 crc kubenswrapper[4675]: I1125 13:00:05.453953 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f55e2db8e4beafdaffb230fdbaf02d77e51fd5bd157d4d14b300beb9bd09a77" Nov 25 13:00:05 crc kubenswrapper[4675]: I1125 13:00:05.454006 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401260-zj7kr" Nov 25 13:00:05 crc kubenswrapper[4675]: I1125 13:00:05.468449 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5"] Nov 25 13:00:05 crc kubenswrapper[4675]: W1125 13:00:05.474137 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d6b5b05_8b7a_4ff2_8972_0f07e3bb9850.slice/crio-0b6080bdd614fe47cfef27a38d83d25b642c7a0c974068e626726d8b803a0845 WatchSource:0}: Error finding container 0b6080bdd614fe47cfef27a38d83d25b642c7a0c974068e626726d8b803a0845: Status 404 returned error can't find the container with id 0b6080bdd614fe47cfef27a38d83d25b642c7a0c974068e626726d8b803a0845 Nov 25 13:00:06 crc kubenswrapper[4675]: I1125 13:00:06.464750 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" event={"ID":"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850","Type":"ContainerStarted","Data":"a6ab8f15dc857cbaf13df62add3f19bd7e1cb2509ebaa531c45baec5fbb18b16"} Nov 25 13:00:06 crc kubenswrapper[4675]: I1125 13:00:06.465378 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" event={"ID":"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850","Type":"ContainerStarted","Data":"0b6080bdd614fe47cfef27a38d83d25b642c7a0c974068e626726d8b803a0845"} Nov 25 13:00:06 crc kubenswrapper[4675]: I1125 13:00:06.489925 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" podStartSLOduration=2.075535288 podStartE2EDuration="2.489908994s" podCreationTimestamp="2025-11-25 13:00:04 +0000 UTC" firstStartedPulling="2025-11-25 13:00:05.4793486 +0000 UTC m=+1950.650940941" lastFinishedPulling="2025-11-25 13:00:05.893722306 +0000 UTC m=+1951.065314647" observedRunningTime="2025-11-25 13:00:06.477212204 +0000 UTC m=+1951.648804545" watchObservedRunningTime="2025-11-25 13:00:06.489908994 +0000 UTC m=+1951.661501335" Nov 25 13:00:08 crc kubenswrapper[4675]: I1125 13:00:08.043194 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-t8hm9"] Nov 25 13:00:08 crc kubenswrapper[4675]: I1125 13:00:08.054367 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-t8hm9"] Nov 25 13:00:09 crc kubenswrapper[4675]: I1125 13:00:09.542968 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="857651f8-5af8-4ed3-95b4-20e2f9417d29" path="/var/lib/kubelet/pods/857651f8-5af8-4ed3-95b4-20e2f9417d29/volumes" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.558947 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zdct5"] Nov 25 13:00:37 crc kubenswrapper[4675]: E1125 13:00:37.559935 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e881be8-6492-4fca-9689-e7d63c9336e5" containerName="collect-profiles" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.559952 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e881be8-6492-4fca-9689-e7d63c9336e5" containerName="collect-profiles" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.560229 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e881be8-6492-4fca-9689-e7d63c9336e5" containerName="collect-profiles" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.562166 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.575211 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zdct5"] Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.660908 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvjc7\" (UniqueName: \"kubernetes.io/projected/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-kube-api-access-kvjc7\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.661332 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-catalog-content\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.661462 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-utilities\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.762771 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvjc7\" (UniqueName: \"kubernetes.io/projected/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-kube-api-access-kvjc7\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.762981 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-catalog-content\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.763012 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-utilities\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.763490 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-utilities\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.763726 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-catalog-content\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.784761 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvjc7\" (UniqueName: \"kubernetes.io/projected/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-kube-api-access-kvjc7\") pod \"certified-operators-zdct5\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:37 crc kubenswrapper[4675]: I1125 13:00:37.885707 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:38 crc kubenswrapper[4675]: I1125 13:00:38.392860 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zdct5"] Nov 25 13:00:38 crc kubenswrapper[4675]: W1125 13:00:38.400420 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbcbe47fd_f078_4b7b_8fea_7f7f0afd4d4f.slice/crio-bf88d9f9007c99798bfa0f1375c6f1e1790cb195f67bfc186f5827c64d10e22b WatchSource:0}: Error finding container bf88d9f9007c99798bfa0f1375c6f1e1790cb195f67bfc186f5827c64d10e22b: Status 404 returned error can't find the container with id bf88d9f9007c99798bfa0f1375c6f1e1790cb195f67bfc186f5827c64d10e22b Nov 25 13:00:38 crc kubenswrapper[4675]: I1125 13:00:38.751199 4675 generic.go:334] "Generic (PLEG): container finished" podID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerID="5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd" exitCode=0 Nov 25 13:00:38 crc kubenswrapper[4675]: I1125 13:00:38.751294 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerDied","Data":"5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd"} Nov 25 13:00:38 crc kubenswrapper[4675]: I1125 13:00:38.751433 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerStarted","Data":"bf88d9f9007c99798bfa0f1375c6f1e1790cb195f67bfc186f5827c64d10e22b"} Nov 25 13:00:40 crc kubenswrapper[4675]: I1125 13:00:40.771569 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerStarted","Data":"01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0"} Nov 25 13:00:46 crc kubenswrapper[4675]: I1125 13:00:46.584783 4675 scope.go:117] "RemoveContainer" containerID="bac85683d946adfc0e86942464942fd73ad3f74ccdfa42d8c60da5e55cea7822" Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.865807 4675 generic.go:334] "Generic (PLEG): container finished" podID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerID="01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0" exitCode=0 Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.865870 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerDied","Data":"01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0"} Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.869648 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.910290 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pqg64"] Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.912692 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.918126 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-catalog-content\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.918605 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-utilities\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.918942 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4svx\" (UniqueName: \"kubernetes.io/projected/be2c7b72-f996-422d-8c75-ce5ff4ef3080-kube-api-access-k4svx\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:47 crc kubenswrapper[4675]: I1125 13:00:47.947056 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pqg64"] Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.020466 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4svx\" (UniqueName: \"kubernetes.io/projected/be2c7b72-f996-422d-8c75-ce5ff4ef3080-kube-api-access-k4svx\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.020535 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-catalog-content\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.020560 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-utilities\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.021100 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-utilities\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.021476 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-catalog-content\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.043986 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4svx\" (UniqueName: \"kubernetes.io/projected/be2c7b72-f996-422d-8c75-ce5ff4ef3080-kube-api-access-k4svx\") pod \"redhat-marketplace-pqg64\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.231417 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.708916 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pqg64"] Nov 25 13:00:48 crc kubenswrapper[4675]: W1125 13:00:48.723026 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe2c7b72_f996_422d_8c75_ce5ff4ef3080.slice/crio-a4d72b18253ac5f445b1187fba2900be690563824f8175366236adb6c2b99671 WatchSource:0}: Error finding container a4d72b18253ac5f445b1187fba2900be690563824f8175366236adb6c2b99671: Status 404 returned error can't find the container with id a4d72b18253ac5f445b1187fba2900be690563824f8175366236adb6c2b99671 Nov 25 13:00:48 crc kubenswrapper[4675]: I1125 13:00:48.873467 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerStarted","Data":"a4d72b18253ac5f445b1187fba2900be690563824f8175366236adb6c2b99671"} Nov 25 13:00:49 crc kubenswrapper[4675]: I1125 13:00:49.882446 4675 generic.go:334] "Generic (PLEG): container finished" podID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerID="f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0" exitCode=0 Nov 25 13:00:49 crc kubenswrapper[4675]: I1125 13:00:49.882506 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerDied","Data":"f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0"} Nov 25 13:00:49 crc kubenswrapper[4675]: I1125 13:00:49.885895 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerStarted","Data":"a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746"} Nov 25 13:00:49 crc kubenswrapper[4675]: I1125 13:00:49.925641 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zdct5" podStartSLOduration=3.026207577 podStartE2EDuration="12.925626357s" podCreationTimestamp="2025-11-25 13:00:37 +0000 UTC" firstStartedPulling="2025-11-25 13:00:38.752674195 +0000 UTC m=+1983.924266536" lastFinishedPulling="2025-11-25 13:00:48.652092975 +0000 UTC m=+1993.823685316" observedRunningTime="2025-11-25 13:00:49.923925541 +0000 UTC m=+1995.095517882" watchObservedRunningTime="2025-11-25 13:00:49.925626357 +0000 UTC m=+1995.097218698" Nov 25 13:00:50 crc kubenswrapper[4675]: I1125 13:00:50.896154 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerStarted","Data":"dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e"} Nov 25 13:00:51 crc kubenswrapper[4675]: I1125 13:00:51.907107 4675 generic.go:334] "Generic (PLEG): container finished" podID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerID="dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e" exitCode=0 Nov 25 13:00:51 crc kubenswrapper[4675]: I1125 13:00:51.907158 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerDied","Data":"dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e"} Nov 25 13:00:52 crc kubenswrapper[4675]: I1125 13:00:52.917345 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerStarted","Data":"ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244"} Nov 25 13:00:52 crc kubenswrapper[4675]: I1125 13:00:52.939120 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pqg64" podStartSLOduration=3.534479483 podStartE2EDuration="5.939100219s" podCreationTimestamp="2025-11-25 13:00:47 +0000 UTC" firstStartedPulling="2025-11-25 13:00:49.884498646 +0000 UTC m=+1995.056090977" lastFinishedPulling="2025-11-25 13:00:52.289119372 +0000 UTC m=+1997.460711713" observedRunningTime="2025-11-25 13:00:52.934054436 +0000 UTC m=+1998.105646797" watchObservedRunningTime="2025-11-25 13:00:52.939100219 +0000 UTC m=+1998.110692570" Nov 25 13:00:57 crc kubenswrapper[4675]: I1125 13:00:57.886593 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:57 crc kubenswrapper[4675]: I1125 13:00:57.887201 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:57 crc kubenswrapper[4675]: I1125 13:00:57.935348 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:57 crc kubenswrapper[4675]: I1125 13:00:57.957248 4675 generic.go:334] "Generic (PLEG): container finished" podID="3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" containerID="a6ab8f15dc857cbaf13df62add3f19bd7e1cb2509ebaa531c45baec5fbb18b16" exitCode=0 Nov 25 13:00:57 crc kubenswrapper[4675]: I1125 13:00:57.958348 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" event={"ID":"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850","Type":"ContainerDied","Data":"a6ab8f15dc857cbaf13df62add3f19bd7e1cb2509ebaa531c45baec5fbb18b16"} Nov 25 13:00:58 crc kubenswrapper[4675]: I1125 13:00:58.013593 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:00:58 crc kubenswrapper[4675]: I1125 13:00:58.182776 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zdct5"] Nov 25 13:00:58 crc kubenswrapper[4675]: I1125 13:00:58.232300 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:58 crc kubenswrapper[4675]: I1125 13:00:58.233084 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:58 crc kubenswrapper[4675]: I1125 13:00:58.286449 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.021969 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.386163 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.539334 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-inventory\") pod \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.539430 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lr5vk\" (UniqueName: \"kubernetes.io/projected/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-kube-api-access-lr5vk\") pod \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.539656 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-ssh-key\") pod \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\" (UID: \"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850\") " Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.561101 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-kube-api-access-lr5vk" (OuterVolumeSpecName: "kube-api-access-lr5vk") pod "3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" (UID: "3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850"). InnerVolumeSpecName "kube-api-access-lr5vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.574416 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-inventory" (OuterVolumeSpecName: "inventory") pod "3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" (UID: "3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.580819 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" (UID: "3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.642538 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.642569 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:00:59 crc kubenswrapper[4675]: I1125 13:00:59.642583 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lr5vk\" (UniqueName: \"kubernetes.io/projected/3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850-kube-api-access-lr5vk\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.004814 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" event={"ID":"3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850","Type":"ContainerDied","Data":"0b6080bdd614fe47cfef27a38d83d25b642c7a0c974068e626726d8b803a0845"} Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.005145 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b6080bdd614fe47cfef27a38d83d25b642c7a0c974068e626726d8b803a0845" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.004941 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zdct5" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="registry-server" containerID="cri-o://a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746" gracePeriod=2 Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.005332 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.104129 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mkh7c"] Nov 25 13:01:00 crc kubenswrapper[4675]: E1125 13:01:00.108140 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.108273 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.108664 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.109632 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.111597 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mkh7c"] Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.116731 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.117119 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.117553 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.117671 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.175584 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401261-blc64"] Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.178087 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.194608 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401261-blc64"] Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.257778 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn6pj\" (UniqueName: \"kubernetes.io/projected/0808241a-edce-45b6-ae18-7b0356549cf6-kube-api-access-xn6pj\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.257927 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.258122 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359383 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359519 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-config-data\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359571 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-fernet-keys\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359595 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359631 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4s24v\" (UniqueName: \"kubernetes.io/projected/b1779f11-7333-4094-a1ee-b509cc09da52-kube-api-access-4s24v\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359664 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-combined-ca-bundle\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.359702 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn6pj\" (UniqueName: \"kubernetes.io/projected/0808241a-edce-45b6-ae18-7b0356549cf6-kube-api-access-xn6pj\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.388643 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.391264 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.393244 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn6pj\" (UniqueName: \"kubernetes.io/projected/0808241a-edce-45b6-ae18-7b0356549cf6-kube-api-access-xn6pj\") pod \"ssh-known-hosts-edpm-deployment-mkh7c\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.460951 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-config-data\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.461029 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-fernet-keys\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.461066 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4s24v\" (UniqueName: \"kubernetes.io/projected/b1779f11-7333-4094-a1ee-b509cc09da52-kube-api-access-4s24v\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.461089 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-combined-ca-bundle\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.470449 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-combined-ca-bundle\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.475020 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-config-data\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.475442 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-fernet-keys\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.487686 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4s24v\" (UniqueName: \"kubernetes.io/projected/b1779f11-7333-4094-a1ee-b509cc09da52-kube-api-access-4s24v\") pod \"keystone-cron-29401261-blc64\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.501637 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.533906 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.594647 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pqg64"] Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.692124 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.875099 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvjc7\" (UniqueName: \"kubernetes.io/projected/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-kube-api-access-kvjc7\") pod \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.875562 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-catalog-content\") pod \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.875666 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-utilities\") pod \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\" (UID: \"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f\") " Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.876946 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-utilities" (OuterVolumeSpecName: "utilities") pod "bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" (UID: "bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.881031 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-kube-api-access-kvjc7" (OuterVolumeSpecName: "kube-api-access-kvjc7") pod "bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" (UID: "bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f"). InnerVolumeSpecName "kube-api-access-kvjc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.929646 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" (UID: "bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.977460 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvjc7\" (UniqueName: \"kubernetes.io/projected/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-kube-api-access-kvjc7\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.977496 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:00 crc kubenswrapper[4675]: I1125 13:01:00.977509 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.015248 4675 generic.go:334] "Generic (PLEG): container finished" podID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerID="a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746" exitCode=0 Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.016295 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zdct5" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.021014 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerDied","Data":"a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746"} Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.021082 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zdct5" event={"ID":"bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f","Type":"ContainerDied","Data":"bf88d9f9007c99798bfa0f1375c6f1e1790cb195f67bfc186f5827c64d10e22b"} Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.021106 4675 scope.go:117] "RemoveContainer" containerID="a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.057170 4675 scope.go:117] "RemoveContainer" containerID="01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.060049 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zdct5"] Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.072426 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zdct5"] Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.078822 4675 scope.go:117] "RemoveContainer" containerID="5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.100626 4675 scope.go:117] "RemoveContainer" containerID="a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746" Nov 25 13:01:01 crc kubenswrapper[4675]: E1125 13:01:01.101449 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746\": container with ID starting with a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746 not found: ID does not exist" containerID="a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.101494 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746"} err="failed to get container status \"a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746\": rpc error: code = NotFound desc = could not find container \"a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746\": container with ID starting with a592a16c1093bb4668e40e029d4910720474817b5d942da6817627f0f8fd2746 not found: ID does not exist" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.101517 4675 scope.go:117] "RemoveContainer" containerID="01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0" Nov 25 13:01:01 crc kubenswrapper[4675]: E1125 13:01:01.102634 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0\": container with ID starting with 01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0 not found: ID does not exist" containerID="01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.102684 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0"} err="failed to get container status \"01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0\": rpc error: code = NotFound desc = could not find container \"01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0\": container with ID starting with 01976ef36ae8165f512cf6ef8c3f95f9ed6b8d46ac32552bd91d26179d6f64a0 not found: ID does not exist" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.102718 4675 scope.go:117] "RemoveContainer" containerID="5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd" Nov 25 13:01:01 crc kubenswrapper[4675]: E1125 13:01:01.103366 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd\": container with ID starting with 5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd not found: ID does not exist" containerID="5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.103394 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd"} err="failed to get container status \"5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd\": rpc error: code = NotFound desc = could not find container \"5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd\": container with ID starting with 5b99a0aaea035fc067bf83b578eb7022a2c92e754c0f47c133430a324c019bcd not found: ID does not exist" Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.153026 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401261-blc64"] Nov 25 13:01:01 crc kubenswrapper[4675]: W1125 13:01:01.158234 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1779f11_7333_4094_a1ee_b509cc09da52.slice/crio-d2b6234492ed29b8b597d6faf4ad572fdf0f114527e75d43375d4a312f236c6e WatchSource:0}: Error finding container d2b6234492ed29b8b597d6faf4ad572fdf0f114527e75d43375d4a312f236c6e: Status 404 returned error can't find the container with id d2b6234492ed29b8b597d6faf4ad572fdf0f114527e75d43375d4a312f236c6e Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.168945 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mkh7c"] Nov 25 13:01:01 crc kubenswrapper[4675]: W1125 13:01:01.174415 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0808241a_edce_45b6_ae18_7b0356549cf6.slice/crio-a1473feb7a9233d7eeb98da3dc0f93aaf352e83aed6e56532b557111277f67c7 WatchSource:0}: Error finding container a1473feb7a9233d7eeb98da3dc0f93aaf352e83aed6e56532b557111277f67c7: Status 404 returned error can't find the container with id a1473feb7a9233d7eeb98da3dc0f93aaf352e83aed6e56532b557111277f67c7 Nov 25 13:01:01 crc kubenswrapper[4675]: I1125 13:01:01.542886 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" path="/var/lib/kubelet/pods/bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f/volumes" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.032997 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" event={"ID":"0808241a-edce-45b6-ae18-7b0356549cf6","Type":"ContainerStarted","Data":"a1473feb7a9233d7eeb98da3dc0f93aaf352e83aed6e56532b557111277f67c7"} Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.035007 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401261-blc64" event={"ID":"b1779f11-7333-4094-a1ee-b509cc09da52","Type":"ContainerStarted","Data":"bca22891e756c84944ad6577a18d4075030f3a7c0c184c424bf04e55dd6a92e7"} Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.035040 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401261-blc64" event={"ID":"b1779f11-7333-4094-a1ee-b509cc09da52","Type":"ContainerStarted","Data":"d2b6234492ed29b8b597d6faf4ad572fdf0f114527e75d43375d4a312f236c6e"} Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.035264 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pqg64" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="registry-server" containerID="cri-o://ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244" gracePeriod=2 Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.063520 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401261-blc64" podStartSLOduration=2.063499066 podStartE2EDuration="2.063499066s" podCreationTimestamp="2025-11-25 13:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:01:02.054954879 +0000 UTC m=+2007.226547220" watchObservedRunningTime="2025-11-25 13:01:02.063499066 +0000 UTC m=+2007.235091397" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.716880 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.838528 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-utilities\") pod \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.839125 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4svx\" (UniqueName: \"kubernetes.io/projected/be2c7b72-f996-422d-8c75-ce5ff4ef3080-kube-api-access-k4svx\") pod \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.839177 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-catalog-content\") pod \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\" (UID: \"be2c7b72-f996-422d-8c75-ce5ff4ef3080\") " Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.839502 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-utilities" (OuterVolumeSpecName: "utilities") pod "be2c7b72-f996-422d-8c75-ce5ff4ef3080" (UID: "be2c7b72-f996-422d-8c75-ce5ff4ef3080"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.839719 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.849287 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be2c7b72-f996-422d-8c75-ce5ff4ef3080-kube-api-access-k4svx" (OuterVolumeSpecName: "kube-api-access-k4svx") pod "be2c7b72-f996-422d-8c75-ce5ff4ef3080" (UID: "be2c7b72-f996-422d-8c75-ce5ff4ef3080"). InnerVolumeSpecName "kube-api-access-k4svx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.865726 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be2c7b72-f996-422d-8c75-ce5ff4ef3080" (UID: "be2c7b72-f996-422d-8c75-ce5ff4ef3080"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.941981 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4svx\" (UniqueName: \"kubernetes.io/projected/be2c7b72-f996-422d-8c75-ce5ff4ef3080-kube-api-access-k4svx\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:02 crc kubenswrapper[4675]: I1125 13:01:02.942020 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be2c7b72-f996-422d-8c75-ce5ff4ef3080-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.048267 4675 generic.go:334] "Generic (PLEG): container finished" podID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerID="ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244" exitCode=0 Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.048351 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerDied","Data":"ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244"} Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.048368 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pqg64" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.048399 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pqg64" event={"ID":"be2c7b72-f996-422d-8c75-ce5ff4ef3080","Type":"ContainerDied","Data":"a4d72b18253ac5f445b1187fba2900be690563824f8175366236adb6c2b99671"} Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.048418 4675 scope.go:117] "RemoveContainer" containerID="ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.082029 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pqg64"] Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.086110 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pqg64"] Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.222083 4675 scope.go:117] "RemoveContainer" containerID="dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.443292 4675 scope.go:117] "RemoveContainer" containerID="f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.499431 4675 scope.go:117] "RemoveContainer" containerID="ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244" Nov 25 13:01:03 crc kubenswrapper[4675]: E1125 13:01:03.500342 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244\": container with ID starting with ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244 not found: ID does not exist" containerID="ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.500380 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244"} err="failed to get container status \"ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244\": rpc error: code = NotFound desc = could not find container \"ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244\": container with ID starting with ff723a36946dced874a08bd478490a68cb295a7110fbdc30558b12b01e844244 not found: ID does not exist" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.500411 4675 scope.go:117] "RemoveContainer" containerID="dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e" Nov 25 13:01:03 crc kubenswrapper[4675]: E1125 13:01:03.500803 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e\": container with ID starting with dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e not found: ID does not exist" containerID="dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.500844 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e"} err="failed to get container status \"dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e\": rpc error: code = NotFound desc = could not find container \"dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e\": container with ID starting with dd99cfb31a5c58b68657e45873a7a49024f47b090a3d624408a3a5c5e8ba4a4e not found: ID does not exist" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.500863 4675 scope.go:117] "RemoveContainer" containerID="f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0" Nov 25 13:01:03 crc kubenswrapper[4675]: E1125 13:01:03.501142 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0\": container with ID starting with f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0 not found: ID does not exist" containerID="f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.501168 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0"} err="failed to get container status \"f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0\": rpc error: code = NotFound desc = could not find container \"f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0\": container with ID starting with f4c05f7263381733278876151ca1194d3c82d3216cec369d187e91e07ae427d0 not found: ID does not exist" Nov 25 13:01:03 crc kubenswrapper[4675]: I1125 13:01:03.542718 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" path="/var/lib/kubelet/pods/be2c7b72-f996-422d-8c75-ce5ff4ef3080/volumes" Nov 25 13:01:04 crc kubenswrapper[4675]: I1125 13:01:04.061867 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" event={"ID":"0808241a-edce-45b6-ae18-7b0356549cf6","Type":"ContainerStarted","Data":"853b0943446075a2267a0c57dbec8f06394f83550ba2884ba39359761ade5c98"} Nov 25 13:01:04 crc kubenswrapper[4675]: I1125 13:01:04.086804 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" podStartSLOduration=1.822755578 podStartE2EDuration="4.086786835s" podCreationTimestamp="2025-11-25 13:01:00 +0000 UTC" firstStartedPulling="2025-11-25 13:01:01.179864898 +0000 UTC m=+2006.351457239" lastFinishedPulling="2025-11-25 13:01:03.443896155 +0000 UTC m=+2008.615488496" observedRunningTime="2025-11-25 13:01:04.086458454 +0000 UTC m=+2009.258050805" watchObservedRunningTime="2025-11-25 13:01:04.086786835 +0000 UTC m=+2009.258379176" Nov 25 13:01:06 crc kubenswrapper[4675]: I1125 13:01:06.078395 4675 generic.go:334] "Generic (PLEG): container finished" podID="b1779f11-7333-4094-a1ee-b509cc09da52" containerID="bca22891e756c84944ad6577a18d4075030f3a7c0c184c424bf04e55dd6a92e7" exitCode=0 Nov 25 13:01:06 crc kubenswrapper[4675]: I1125 13:01:06.078482 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401261-blc64" event={"ID":"b1779f11-7333-4094-a1ee-b509cc09da52","Type":"ContainerDied","Data":"bca22891e756c84944ad6577a18d4075030f3a7c0c184c424bf04e55dd6a92e7"} Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.423539 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.526368 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4s24v\" (UniqueName: \"kubernetes.io/projected/b1779f11-7333-4094-a1ee-b509cc09da52-kube-api-access-4s24v\") pod \"b1779f11-7333-4094-a1ee-b509cc09da52\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.526475 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-config-data\") pod \"b1779f11-7333-4094-a1ee-b509cc09da52\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.526501 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-combined-ca-bundle\") pod \"b1779f11-7333-4094-a1ee-b509cc09da52\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.526632 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-fernet-keys\") pod \"b1779f11-7333-4094-a1ee-b509cc09da52\" (UID: \"b1779f11-7333-4094-a1ee-b509cc09da52\") " Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.543747 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1779f11-7333-4094-a1ee-b509cc09da52-kube-api-access-4s24v" (OuterVolumeSpecName: "kube-api-access-4s24v") pod "b1779f11-7333-4094-a1ee-b509cc09da52" (UID: "b1779f11-7333-4094-a1ee-b509cc09da52"). InnerVolumeSpecName "kube-api-access-4s24v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.557658 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b1779f11-7333-4094-a1ee-b509cc09da52" (UID: "b1779f11-7333-4094-a1ee-b509cc09da52"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.592623 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1779f11-7333-4094-a1ee-b509cc09da52" (UID: "b1779f11-7333-4094-a1ee-b509cc09da52"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.593144 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-config-data" (OuterVolumeSpecName: "config-data") pod "b1779f11-7333-4094-a1ee-b509cc09da52" (UID: "b1779f11-7333-4094-a1ee-b509cc09da52"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.629144 4675 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.629178 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4s24v\" (UniqueName: \"kubernetes.io/projected/b1779f11-7333-4094-a1ee-b509cc09da52-kube-api-access-4s24v\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.629188 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:07 crc kubenswrapper[4675]: I1125 13:01:07.629197 4675 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1779f11-7333-4094-a1ee-b509cc09da52-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:08 crc kubenswrapper[4675]: I1125 13:01:08.097280 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401261-blc64" event={"ID":"b1779f11-7333-4094-a1ee-b509cc09da52","Type":"ContainerDied","Data":"d2b6234492ed29b8b597d6faf4ad572fdf0f114527e75d43375d4a312f236c6e"} Nov 25 13:01:08 crc kubenswrapper[4675]: I1125 13:01:08.097327 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2b6234492ed29b8b597d6faf4ad572fdf0f114527e75d43375d4a312f236c6e" Nov 25 13:01:08 crc kubenswrapper[4675]: I1125 13:01:08.097731 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401261-blc64" Nov 25 13:01:11 crc kubenswrapper[4675]: I1125 13:01:11.138561 4675 generic.go:334] "Generic (PLEG): container finished" podID="0808241a-edce-45b6-ae18-7b0356549cf6" containerID="853b0943446075a2267a0c57dbec8f06394f83550ba2884ba39359761ade5c98" exitCode=0 Nov 25 13:01:11 crc kubenswrapper[4675]: I1125 13:01:11.138744 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" event={"ID":"0808241a-edce-45b6-ae18-7b0356549cf6","Type":"ContainerDied","Data":"853b0943446075a2267a0c57dbec8f06394f83550ba2884ba39359761ade5c98"} Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.588840 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.723656 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn6pj\" (UniqueName: \"kubernetes.io/projected/0808241a-edce-45b6-ae18-7b0356549cf6-kube-api-access-xn6pj\") pod \"0808241a-edce-45b6-ae18-7b0356549cf6\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.723712 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-ssh-key-openstack-edpm-ipam\") pod \"0808241a-edce-45b6-ae18-7b0356549cf6\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.723784 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-inventory-0\") pod \"0808241a-edce-45b6-ae18-7b0356549cf6\" (UID: \"0808241a-edce-45b6-ae18-7b0356549cf6\") " Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.733273 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0808241a-edce-45b6-ae18-7b0356549cf6-kube-api-access-xn6pj" (OuterVolumeSpecName: "kube-api-access-xn6pj") pod "0808241a-edce-45b6-ae18-7b0356549cf6" (UID: "0808241a-edce-45b6-ae18-7b0356549cf6"). InnerVolumeSpecName "kube-api-access-xn6pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.748951 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "0808241a-edce-45b6-ae18-7b0356549cf6" (UID: "0808241a-edce-45b6-ae18-7b0356549cf6"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.750605 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0808241a-edce-45b6-ae18-7b0356549cf6" (UID: "0808241a-edce-45b6-ae18-7b0356549cf6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.825502 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn6pj\" (UniqueName: \"kubernetes.io/projected/0808241a-edce-45b6-ae18-7b0356549cf6-kube-api-access-xn6pj\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.825530 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:12 crc kubenswrapper[4675]: I1125 13:01:12.825539 4675 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0808241a-edce-45b6-ae18-7b0356549cf6-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.157870 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" event={"ID":"0808241a-edce-45b6-ae18-7b0356549cf6","Type":"ContainerDied","Data":"a1473feb7a9233d7eeb98da3dc0f93aaf352e83aed6e56532b557111277f67c7"} Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.158214 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1473feb7a9233d7eeb98da3dc0f93aaf352e83aed6e56532b557111277f67c7" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.157990 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mkh7c" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.229659 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t"] Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230016 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="registry-server" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230028 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="registry-server" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230041 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0808241a-edce-45b6-ae18-7b0356549cf6" containerName="ssh-known-hosts-edpm-deployment" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230048 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="0808241a-edce-45b6-ae18-7b0356549cf6" containerName="ssh-known-hosts-edpm-deployment" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230070 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="extract-content" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230077 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="extract-content" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230087 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1779f11-7333-4094-a1ee-b509cc09da52" containerName="keystone-cron" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230092 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1779f11-7333-4094-a1ee-b509cc09da52" containerName="keystone-cron" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230108 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="extract-utilities" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230113 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="extract-utilities" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230124 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="extract-content" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230129 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="extract-content" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230140 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="registry-server" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230145 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="registry-server" Nov 25 13:01:13 crc kubenswrapper[4675]: E1125 13:01:13.230165 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="extract-utilities" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230171 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="extract-utilities" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230329 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="0808241a-edce-45b6-ae18-7b0356549cf6" containerName="ssh-known-hosts-edpm-deployment" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230337 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcbe47fd-f078-4b7b-8fea-7f7f0afd4d4f" containerName="registry-server" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230347 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1779f11-7333-4094-a1ee-b509cc09da52" containerName="keystone-cron" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230359 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="be2c7b72-f996-422d-8c75-ce5ff4ef3080" containerName="registry-server" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.230987 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.233445 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.234158 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.234358 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.234633 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.280475 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t"] Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.336967 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.337028 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.337142 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9sd4\" (UniqueName: \"kubernetes.io/projected/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-kube-api-access-h9sd4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.439249 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.439329 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.439466 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9sd4\" (UniqueName: \"kubernetes.io/projected/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-kube-api-access-h9sd4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.446404 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.447751 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.467767 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9sd4\" (UniqueName: \"kubernetes.io/projected/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-kube-api-access-h9sd4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-87t9t\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.546677 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.661937 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:01:13 crc kubenswrapper[4675]: I1125 13:01:13.662246 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:01:14 crc kubenswrapper[4675]: I1125 13:01:14.096542 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t"] Nov 25 13:01:14 crc kubenswrapper[4675]: I1125 13:01:14.169281 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" event={"ID":"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8","Type":"ContainerStarted","Data":"3672d04e83920782929cc0c8d15bff96ce0cc35a2224e788adc9c522db3f3cc6"} Nov 25 13:01:15 crc kubenswrapper[4675]: I1125 13:01:15.177649 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" event={"ID":"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8","Type":"ContainerStarted","Data":"5f4ccca050fb322b992cea71a1a06c49cc7886d68ca1ca96610093022a57f712"} Nov 25 13:01:15 crc kubenswrapper[4675]: I1125 13:01:15.198516 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" podStartSLOduration=1.726173196 podStartE2EDuration="2.198490646s" podCreationTimestamp="2025-11-25 13:01:13 +0000 UTC" firstStartedPulling="2025-11-25 13:01:14.102480278 +0000 UTC m=+2019.274072619" lastFinishedPulling="2025-11-25 13:01:14.574797718 +0000 UTC m=+2019.746390069" observedRunningTime="2025-11-25 13:01:15.196005336 +0000 UTC m=+2020.367597687" watchObservedRunningTime="2025-11-25 13:01:15.198490646 +0000 UTC m=+2020.370082997" Nov 25 13:01:24 crc kubenswrapper[4675]: I1125 13:01:24.249969 4675 generic.go:334] "Generic (PLEG): container finished" podID="58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" containerID="5f4ccca050fb322b992cea71a1a06c49cc7886d68ca1ca96610093022a57f712" exitCode=0 Nov 25 13:01:24 crc kubenswrapper[4675]: I1125 13:01:24.250045 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" event={"ID":"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8","Type":"ContainerDied","Data":"5f4ccca050fb322b992cea71a1a06c49cc7886d68ca1ca96610093022a57f712"} Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.665671 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.776221 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-ssh-key\") pod \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.776412 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9sd4\" (UniqueName: \"kubernetes.io/projected/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-kube-api-access-h9sd4\") pod \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.776467 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-inventory\") pod \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\" (UID: \"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8\") " Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.783248 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-kube-api-access-h9sd4" (OuterVolumeSpecName: "kube-api-access-h9sd4") pod "58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" (UID: "58949bc5-8d5f-4d04-bf70-eb2e0a55cda8"). InnerVolumeSpecName "kube-api-access-h9sd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.810837 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" (UID: "58949bc5-8d5f-4d04-bf70-eb2e0a55cda8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.814607 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-inventory" (OuterVolumeSpecName: "inventory") pod "58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" (UID: "58949bc5-8d5f-4d04-bf70-eb2e0a55cda8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.880909 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9sd4\" (UniqueName: \"kubernetes.io/projected/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-kube-api-access-h9sd4\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.881207 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:25 crc kubenswrapper[4675]: I1125 13:01:25.881276 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58949bc5-8d5f-4d04-bf70-eb2e0a55cda8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.270534 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" event={"ID":"58949bc5-8d5f-4d04-bf70-eb2e0a55cda8","Type":"ContainerDied","Data":"3672d04e83920782929cc0c8d15bff96ce0cc35a2224e788adc9c522db3f3cc6"} Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.270583 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3672d04e83920782929cc0c8d15bff96ce0cc35a2224e788adc9c522db3f3cc6" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.270618 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-87t9t" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.354195 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5"] Nov 25 13:01:26 crc kubenswrapper[4675]: E1125 13:01:26.368278 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.368308 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.368554 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="58949bc5-8d5f-4d04-bf70-eb2e0a55cda8" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.372235 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5"] Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.372331 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.375640 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.375916 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.376081 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.377314 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.491596 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lprcj\" (UniqueName: \"kubernetes.io/projected/13de1b17-7309-4325-9264-52182799c3be-kube-api-access-lprcj\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.492060 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.492238 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.593727 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lprcj\" (UniqueName: \"kubernetes.io/projected/13de1b17-7309-4325-9264-52182799c3be-kube-api-access-lprcj\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.593800 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.593858 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.599343 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.604180 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.612796 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lprcj\" (UniqueName: \"kubernetes.io/projected/13de1b17-7309-4325-9264-52182799c3be-kube-api-access-lprcj\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:26 crc kubenswrapper[4675]: I1125 13:01:26.698428 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:27 crc kubenswrapper[4675]: I1125 13:01:27.197627 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5"] Nov 25 13:01:27 crc kubenswrapper[4675]: I1125 13:01:27.280843 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" event={"ID":"13de1b17-7309-4325-9264-52182799c3be","Type":"ContainerStarted","Data":"d1f36a449d4e6d3cc08d8e85d83d328fe8bd2a428390811657ebc8d15202c623"} Nov 25 13:01:28 crc kubenswrapper[4675]: I1125 13:01:28.289212 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" event={"ID":"13de1b17-7309-4325-9264-52182799c3be","Type":"ContainerStarted","Data":"7330346e95318d79552bc03e248b3a3bafc1a54287def6ab0029f67e6df05725"} Nov 25 13:01:28 crc kubenswrapper[4675]: I1125 13:01:28.307728 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" podStartSLOduration=1.909801108 podStartE2EDuration="2.307707971s" podCreationTimestamp="2025-11-25 13:01:26 +0000 UTC" firstStartedPulling="2025-11-25 13:01:27.210202435 +0000 UTC m=+2032.381794776" lastFinishedPulling="2025-11-25 13:01:27.608109278 +0000 UTC m=+2032.779701639" observedRunningTime="2025-11-25 13:01:28.304616441 +0000 UTC m=+2033.476208782" watchObservedRunningTime="2025-11-25 13:01:28.307707971 +0000 UTC m=+2033.479300312" Nov 25 13:01:37 crc kubenswrapper[4675]: I1125 13:01:37.361422 4675 generic.go:334] "Generic (PLEG): container finished" podID="13de1b17-7309-4325-9264-52182799c3be" containerID="7330346e95318d79552bc03e248b3a3bafc1a54287def6ab0029f67e6df05725" exitCode=0 Nov 25 13:01:37 crc kubenswrapper[4675]: I1125 13:01:37.361500 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" event={"ID":"13de1b17-7309-4325-9264-52182799c3be","Type":"ContainerDied","Data":"7330346e95318d79552bc03e248b3a3bafc1a54287def6ab0029f67e6df05725"} Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.778496 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.924010 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-ssh-key\") pod \"13de1b17-7309-4325-9264-52182799c3be\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.924230 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-inventory\") pod \"13de1b17-7309-4325-9264-52182799c3be\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.924304 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lprcj\" (UniqueName: \"kubernetes.io/projected/13de1b17-7309-4325-9264-52182799c3be-kube-api-access-lprcj\") pod \"13de1b17-7309-4325-9264-52182799c3be\" (UID: \"13de1b17-7309-4325-9264-52182799c3be\") " Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.933603 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13de1b17-7309-4325-9264-52182799c3be-kube-api-access-lprcj" (OuterVolumeSpecName: "kube-api-access-lprcj") pod "13de1b17-7309-4325-9264-52182799c3be" (UID: "13de1b17-7309-4325-9264-52182799c3be"). InnerVolumeSpecName "kube-api-access-lprcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.950189 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-inventory" (OuterVolumeSpecName: "inventory") pod "13de1b17-7309-4325-9264-52182799c3be" (UID: "13de1b17-7309-4325-9264-52182799c3be"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:38 crc kubenswrapper[4675]: I1125 13:01:38.957184 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "13de1b17-7309-4325-9264-52182799c3be" (UID: "13de1b17-7309-4325-9264-52182799c3be"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.026441 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.026470 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13de1b17-7309-4325-9264-52182799c3be-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.026479 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lprcj\" (UniqueName: \"kubernetes.io/projected/13de1b17-7309-4325-9264-52182799c3be-kube-api-access-lprcj\") on node \"crc\" DevicePath \"\"" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.380852 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" event={"ID":"13de1b17-7309-4325-9264-52182799c3be","Type":"ContainerDied","Data":"d1f36a449d4e6d3cc08d8e85d83d328fe8bd2a428390811657ebc8d15202c623"} Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.380897 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1f36a449d4e6d3cc08d8e85d83d328fe8bd2a428390811657ebc8d15202c623" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.380957 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.468445 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf"] Nov 25 13:01:39 crc kubenswrapper[4675]: E1125 13:01:39.468962 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13de1b17-7309-4325-9264-52182799c3be" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.468987 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="13de1b17-7309-4325-9264-52182799c3be" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.469201 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="13de1b17-7309-4325-9264-52182799c3be" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.469915 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.472289 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.476548 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.476758 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.476780 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.476654 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.476956 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.477030 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.477120 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.485351 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf"] Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.645896 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.645980 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646028 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646060 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646115 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646146 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646187 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646217 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646284 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljp66\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-kube-api-access-ljp66\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646345 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646393 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646423 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646467 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.646496 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.748222 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.749418 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.749458 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.749928 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.749995 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750031 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750063 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750511 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750572 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljp66\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-kube-api-access-ljp66\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750622 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750666 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750694 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750721 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.750757 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.756184 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.756702 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.757091 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.757246 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.757527 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.758976 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.759465 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.760560 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.761630 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.762255 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.763217 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.767494 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.771836 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.776223 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljp66\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-kube-api-access-ljp66\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-c46tf\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:39 crc kubenswrapper[4675]: I1125 13:01:39.814001 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:01:40 crc kubenswrapper[4675]: I1125 13:01:40.329258 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf"] Nov 25 13:01:40 crc kubenswrapper[4675]: I1125 13:01:40.389675 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" event={"ID":"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46","Type":"ContainerStarted","Data":"1015152e0b6fb16e4b58e5f83bcd4c68786dc4084fc215672d830632723afb08"} Nov 25 13:01:41 crc kubenswrapper[4675]: I1125 13:01:41.436825 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" event={"ID":"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46","Type":"ContainerStarted","Data":"179c14e895f17bb74e148767b2109ad38dfc677802b173cb6a638028b271bf84"} Nov 25 13:01:41 crc kubenswrapper[4675]: I1125 13:01:41.477534 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" podStartSLOduration=2.089312817 podStartE2EDuration="2.477513066s" podCreationTimestamp="2025-11-25 13:01:39 +0000 UTC" firstStartedPulling="2025-11-25 13:01:40.336784991 +0000 UTC m=+2045.508377332" lastFinishedPulling="2025-11-25 13:01:40.72498521 +0000 UTC m=+2045.896577581" observedRunningTime="2025-11-25 13:01:41.471429449 +0000 UTC m=+2046.643021790" watchObservedRunningTime="2025-11-25 13:01:41.477513066 +0000 UTC m=+2046.649105417" Nov 25 13:01:43 crc kubenswrapper[4675]: I1125 13:01:43.661694 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:01:43 crc kubenswrapper[4675]: I1125 13:01:43.661753 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.109800 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rzcbd"] Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.114167 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.129588 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rzcbd"] Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.240551 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdwx9\" (UniqueName: \"kubernetes.io/projected/e2d84373-af8f-4fb7-a44a-ddcd716fda33-kube-api-access-fdwx9\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.240747 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-catalog-content\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.240794 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-utilities\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.307279 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tk5lf"] Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.320091 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.343058 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdwx9\" (UniqueName: \"kubernetes.io/projected/e2d84373-af8f-4fb7-a44a-ddcd716fda33-kube-api-access-fdwx9\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.343198 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-catalog-content\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.343229 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-utilities\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.344187 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-catalog-content\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.359230 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-utilities\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.388178 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tk5lf"] Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.404954 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdwx9\" (UniqueName: \"kubernetes.io/projected/e2d84373-af8f-4fb7-a44a-ddcd716fda33-kube-api-access-fdwx9\") pod \"redhat-operators-rzcbd\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.443244 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.444626 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-utilities\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.444704 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vckgx\" (UniqueName: \"kubernetes.io/projected/186b5298-d646-4785-a460-0bb0e9227cd0-kube-api-access-vckgx\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.444726 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-catalog-content\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.546004 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-utilities\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.546447 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vckgx\" (UniqueName: \"kubernetes.io/projected/186b5298-d646-4785-a460-0bb0e9227cd0-kube-api-access-vckgx\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.546481 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-catalog-content\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.547139 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-catalog-content\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.548698 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-utilities\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.574849 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vckgx\" (UniqueName: \"kubernetes.io/projected/186b5298-d646-4785-a460-0bb0e9227cd0-kube-api-access-vckgx\") pod \"community-operators-tk5lf\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.663994 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:01:54 crc kubenswrapper[4675]: I1125 13:01:54.888997 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rzcbd"] Nov 25 13:01:54 crc kubenswrapper[4675]: W1125 13:01:54.929586 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2d84373_af8f_4fb7_a44a_ddcd716fda33.slice/crio-11484eac02e56918478be6d33bb4a6ed53d47b1e7524fc5092e5cee568aaa668 WatchSource:0}: Error finding container 11484eac02e56918478be6d33bb4a6ed53d47b1e7524fc5092e5cee568aaa668: Status 404 returned error can't find the container with id 11484eac02e56918478be6d33bb4a6ed53d47b1e7524fc5092e5cee568aaa668 Nov 25 13:01:55 crc kubenswrapper[4675]: I1125 13:01:55.146638 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tk5lf"] Nov 25 13:01:55 crc kubenswrapper[4675]: I1125 13:01:55.558196 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerID="23b79ce6eba89c02befc098d3eec0957a09f1347f7624e639a2f791f5944d2e9" exitCode=0 Nov 25 13:01:55 crc kubenswrapper[4675]: I1125 13:01:55.558393 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerDied","Data":"23b79ce6eba89c02befc098d3eec0957a09f1347f7624e639a2f791f5944d2e9"} Nov 25 13:01:55 crc kubenswrapper[4675]: I1125 13:01:55.558975 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerStarted","Data":"11484eac02e56918478be6d33bb4a6ed53d47b1e7524fc5092e5cee568aaa668"} Nov 25 13:01:55 crc kubenswrapper[4675]: I1125 13:01:55.561554 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerStarted","Data":"4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a"} Nov 25 13:01:55 crc kubenswrapper[4675]: I1125 13:01:55.561586 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerStarted","Data":"763faf8998a0c73eca80602db4e90f824d52bc18580dad6ec2ff881314d4327f"} Nov 25 13:01:56 crc kubenswrapper[4675]: I1125 13:01:56.572190 4675 generic.go:334] "Generic (PLEG): container finished" podID="186b5298-d646-4785-a460-0bb0e9227cd0" containerID="4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a" exitCode=0 Nov 25 13:01:56 crc kubenswrapper[4675]: I1125 13:01:56.572295 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerDied","Data":"4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a"} Nov 25 13:01:56 crc kubenswrapper[4675]: I1125 13:01:56.577798 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerStarted","Data":"73ea3eb665b2b2af5941acbae075aee7ae6b7ccf5d9c97ad0c5e3cd3f3538127"} Nov 25 13:01:58 crc kubenswrapper[4675]: I1125 13:01:58.595200 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerStarted","Data":"8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26"} Nov 25 13:02:00 crc kubenswrapper[4675]: I1125 13:02:00.612858 4675 generic.go:334] "Generic (PLEG): container finished" podID="186b5298-d646-4785-a460-0bb0e9227cd0" containerID="8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26" exitCode=0 Nov 25 13:02:00 crc kubenswrapper[4675]: I1125 13:02:00.612898 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerDied","Data":"8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26"} Nov 25 13:02:01 crc kubenswrapper[4675]: I1125 13:02:01.622855 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerID="73ea3eb665b2b2af5941acbae075aee7ae6b7ccf5d9c97ad0c5e3cd3f3538127" exitCode=0 Nov 25 13:02:01 crc kubenswrapper[4675]: I1125 13:02:01.622903 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerDied","Data":"73ea3eb665b2b2af5941acbae075aee7ae6b7ccf5d9c97ad0c5e3cd3f3538127"} Nov 25 13:02:01 crc kubenswrapper[4675]: I1125 13:02:01.629350 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerStarted","Data":"54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82"} Nov 25 13:02:01 crc kubenswrapper[4675]: I1125 13:02:01.668579 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tk5lf" podStartSLOduration=3.148884055 podStartE2EDuration="7.668554947s" podCreationTimestamp="2025-11-25 13:01:54 +0000 UTC" firstStartedPulling="2025-11-25 13:01:56.576635861 +0000 UTC m=+2061.748228202" lastFinishedPulling="2025-11-25 13:02:01.096306753 +0000 UTC m=+2066.267899094" observedRunningTime="2025-11-25 13:02:01.665521689 +0000 UTC m=+2066.837114050" watchObservedRunningTime="2025-11-25 13:02:01.668554947 +0000 UTC m=+2066.840147298" Nov 25 13:02:02 crc kubenswrapper[4675]: I1125 13:02:02.645714 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerStarted","Data":"63c9e8e464cb2684630f7b559f9e4365f6b131306f0da3a2b921b19a720cf406"} Nov 25 13:02:02 crc kubenswrapper[4675]: I1125 13:02:02.663548 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rzcbd" podStartSLOduration=2.126912582 podStartE2EDuration="8.663534337s" podCreationTimestamp="2025-11-25 13:01:54 +0000 UTC" firstStartedPulling="2025-11-25 13:01:55.560428914 +0000 UTC m=+2060.732021255" lastFinishedPulling="2025-11-25 13:02:02.097050669 +0000 UTC m=+2067.268643010" observedRunningTime="2025-11-25 13:02:02.660654743 +0000 UTC m=+2067.832247084" watchObservedRunningTime="2025-11-25 13:02:02.663534337 +0000 UTC m=+2067.835126678" Nov 25 13:02:04 crc kubenswrapper[4675]: I1125 13:02:04.445193 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:02:04 crc kubenswrapper[4675]: I1125 13:02:04.447279 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:02:04 crc kubenswrapper[4675]: I1125 13:02:04.665243 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:02:04 crc kubenswrapper[4675]: I1125 13:02:04.665582 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:02:04 crc kubenswrapper[4675]: I1125 13:02:04.715483 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:02:05 crc kubenswrapper[4675]: I1125 13:02:05.523338 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rzcbd" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="registry-server" probeResult="failure" output=< Nov 25 13:02:05 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:02:05 crc kubenswrapper[4675]: > Nov 25 13:02:13 crc kubenswrapper[4675]: I1125 13:02:13.662430 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:02:13 crc kubenswrapper[4675]: I1125 13:02:13.663012 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:02:13 crc kubenswrapper[4675]: I1125 13:02:13.663066 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:02:13 crc kubenswrapper[4675]: I1125 13:02:13.663869 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"77326986e63dd4ee6e1e015549b847b50974a5865176ff1042fefaba40517e76"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:02:13 crc kubenswrapper[4675]: I1125 13:02:13.663933 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://77326986e63dd4ee6e1e015549b847b50974a5865176ff1042fefaba40517e76" gracePeriod=600 Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.262310 4675 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-675f685b59-np48s" podUID="08133520-c4c6-4b59-b426-d18290e4195a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.740520 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.765409 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="77326986e63dd4ee6e1e015549b847b50974a5865176ff1042fefaba40517e76" exitCode=0 Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.765532 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"77326986e63dd4ee6e1e015549b847b50974a5865176ff1042fefaba40517e76"} Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.765570 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935"} Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.765601 4675 scope.go:117] "RemoveContainer" containerID="0b8a762acda38992bc86f2f9e0d052ba5995f4eba36cd316341723b588c40e61" Nov 25 13:02:14 crc kubenswrapper[4675]: I1125 13:02:14.832520 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tk5lf"] Nov 25 13:02:15 crc kubenswrapper[4675]: I1125 13:02:15.526383 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rzcbd" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="registry-server" probeResult="failure" output=< Nov 25 13:02:15 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:02:15 crc kubenswrapper[4675]: > Nov 25 13:02:15 crc kubenswrapper[4675]: I1125 13:02:15.777732 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tk5lf" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="registry-server" containerID="cri-o://54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82" gracePeriod=2 Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.255541 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.383581 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-utilities\") pod \"186b5298-d646-4785-a460-0bb0e9227cd0\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.384762 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-utilities" (OuterVolumeSpecName: "utilities") pod "186b5298-d646-4785-a460-0bb0e9227cd0" (UID: "186b5298-d646-4785-a460-0bb0e9227cd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.385151 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vckgx\" (UniqueName: \"kubernetes.io/projected/186b5298-d646-4785-a460-0bb0e9227cd0-kube-api-access-vckgx\") pod \"186b5298-d646-4785-a460-0bb0e9227cd0\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.386582 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-catalog-content\") pod \"186b5298-d646-4785-a460-0bb0e9227cd0\" (UID: \"186b5298-d646-4785-a460-0bb0e9227cd0\") " Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.387480 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.399025 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/186b5298-d646-4785-a460-0bb0e9227cd0-kube-api-access-vckgx" (OuterVolumeSpecName: "kube-api-access-vckgx") pod "186b5298-d646-4785-a460-0bb0e9227cd0" (UID: "186b5298-d646-4785-a460-0bb0e9227cd0"). InnerVolumeSpecName "kube-api-access-vckgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.459779 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "186b5298-d646-4785-a460-0bb0e9227cd0" (UID: "186b5298-d646-4785-a460-0bb0e9227cd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.489267 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vckgx\" (UniqueName: \"kubernetes.io/projected/186b5298-d646-4785-a460-0bb0e9227cd0-kube-api-access-vckgx\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.489527 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/186b5298-d646-4785-a460-0bb0e9227cd0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.788604 4675 generic.go:334] "Generic (PLEG): container finished" podID="186b5298-d646-4785-a460-0bb0e9227cd0" containerID="54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82" exitCode=0 Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.788672 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerDied","Data":"54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82"} Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.788681 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk5lf" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.788707 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk5lf" event={"ID":"186b5298-d646-4785-a460-0bb0e9227cd0","Type":"ContainerDied","Data":"763faf8998a0c73eca80602db4e90f824d52bc18580dad6ec2ff881314d4327f"} Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.788747 4675 scope.go:117] "RemoveContainer" containerID="54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.810680 4675 scope.go:117] "RemoveContainer" containerID="8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.837984 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tk5lf"] Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.846040 4675 scope.go:117] "RemoveContainer" containerID="4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.854204 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tk5lf"] Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.901201 4675 scope.go:117] "RemoveContainer" containerID="54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82" Nov 25 13:02:16 crc kubenswrapper[4675]: E1125 13:02:16.902061 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82\": container with ID starting with 54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82 not found: ID does not exist" containerID="54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.902115 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82"} err="failed to get container status \"54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82\": rpc error: code = NotFound desc = could not find container \"54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82\": container with ID starting with 54d53ae16182c4d2df0b3a130260e6f53af05b672be0b631d453d712ad1b8d82 not found: ID does not exist" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.902140 4675 scope.go:117] "RemoveContainer" containerID="8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26" Nov 25 13:02:16 crc kubenswrapper[4675]: E1125 13:02:16.902590 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26\": container with ID starting with 8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26 not found: ID does not exist" containerID="8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.902631 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26"} err="failed to get container status \"8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26\": rpc error: code = NotFound desc = could not find container \"8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26\": container with ID starting with 8762a94aeb2b316bd93ae4fcc71375983f603c7bc490836070901e85a11ebd26 not found: ID does not exist" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.902659 4675 scope.go:117] "RemoveContainer" containerID="4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a" Nov 25 13:02:16 crc kubenswrapper[4675]: E1125 13:02:16.903208 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a\": container with ID starting with 4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a not found: ID does not exist" containerID="4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a" Nov 25 13:02:16 crc kubenswrapper[4675]: I1125 13:02:16.903239 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a"} err="failed to get container status \"4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a\": rpc error: code = NotFound desc = could not find container \"4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a\": container with ID starting with 4e2f994a1e6233889a444303739a4d84820741bf6db64aea6d4655647295ee5a not found: ID does not exist" Nov 25 13:02:17 crc kubenswrapper[4675]: I1125 13:02:17.543235 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" path="/var/lib/kubelet/pods/186b5298-d646-4785-a460-0bb0e9227cd0/volumes" Nov 25 13:02:18 crc kubenswrapper[4675]: I1125 13:02:18.807642 4675 generic.go:334] "Generic (PLEG): container finished" podID="b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" containerID="179c14e895f17bb74e148767b2109ad38dfc677802b173cb6a638028b271bf84" exitCode=0 Nov 25 13:02:18 crc kubenswrapper[4675]: I1125 13:02:18.807691 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" event={"ID":"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46","Type":"ContainerDied","Data":"179c14e895f17bb74e148767b2109ad38dfc677802b173cb6a638028b271bf84"} Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.285480 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.366887 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.366936 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ssh-key\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367016 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-nova-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367057 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ovn-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367076 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljp66\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-kube-api-access-ljp66\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367094 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-repo-setup-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367127 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-libvirt-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367194 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-bootstrap-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367211 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-inventory\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367239 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-neutron-metadata-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367270 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-telemetry-combined-ca-bundle\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367287 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367309 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.367330 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\" (UID: \"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46\") " Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.376979 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.377066 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.377523 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.378516 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.380263 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.380436 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.380628 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.380645 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-kube-api-access-ljp66" (OuterVolumeSpecName: "kube-api-access-ljp66") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "kube-api-access-ljp66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.381064 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.383129 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.383251 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.384809 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.404330 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.415127 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-inventory" (OuterVolumeSpecName: "inventory") pod "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" (UID: "b04ef6a2-5500-4d9d-87ad-1ec2762a5a46"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.468986 4675 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469178 4675 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469239 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469320 4675 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469376 4675 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469391 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469405 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469417 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469430 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469441 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469451 4675 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469462 4675 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469471 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljp66\" (UniqueName: \"kubernetes.io/projected/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-kube-api-access-ljp66\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.469480 4675 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ef6a2-5500-4d9d-87ad-1ec2762a5a46-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.826261 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" event={"ID":"b04ef6a2-5500-4d9d-87ad-1ec2762a5a46","Type":"ContainerDied","Data":"1015152e0b6fb16e4b58e5f83bcd4c68786dc4084fc215672d830632723afb08"} Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.826322 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1015152e0b6fb16e4b58e5f83bcd4c68786dc4084fc215672d830632723afb08" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.826315 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-c46tf" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.990799 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr"] Nov 25 13:02:20 crc kubenswrapper[4675]: E1125 13:02:20.991355 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="extract-content" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.991448 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="extract-content" Nov 25 13:02:20 crc kubenswrapper[4675]: E1125 13:02:20.991510 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.991571 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 13:02:20 crc kubenswrapper[4675]: E1125 13:02:20.991635 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="extract-utilities" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.991683 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="extract-utilities" Nov 25 13:02:20 crc kubenswrapper[4675]: E1125 13:02:20.991742 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="registry-server" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.991793 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="registry-server" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.992031 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="b04ef6a2-5500-4d9d-87ad-1ec2762a5a46" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.994187 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="186b5298-d646-4785-a460-0bb0e9227cd0" containerName="registry-server" Nov 25 13:02:20 crc kubenswrapper[4675]: I1125 13:02:20.994881 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.001625 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.001882 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.006680 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.008131 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.008305 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.009417 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr"] Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.081451 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.081785 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.081830 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.081962 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4b9s\" (UniqueName: \"kubernetes.io/projected/5f3381e9-49d9-47ea-87dd-86442bf3394a-kube-api-access-f4b9s\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.082021 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.184072 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.184139 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.184171 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.184232 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4b9s\" (UniqueName: \"kubernetes.io/projected/5f3381e9-49d9-47ea-87dd-86442bf3394a-kube-api-access-f4b9s\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.184258 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.186002 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.188489 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.189448 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.191994 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.213638 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4b9s\" (UniqueName: \"kubernetes.io/projected/5f3381e9-49d9-47ea-87dd-86442bf3394a-kube-api-access-f4b9s\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vq5tr\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.318398 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:02:21 crc kubenswrapper[4675]: I1125 13:02:21.882917 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr"] Nov 25 13:02:22 crc kubenswrapper[4675]: I1125 13:02:22.842803 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" event={"ID":"5f3381e9-49d9-47ea-87dd-86442bf3394a","Type":"ContainerStarted","Data":"2e9f4cccd6b6c2219db75f0ec9d2229daf5382345adeb7daa8e4f99547e2d0f9"} Nov 25 13:02:23 crc kubenswrapper[4675]: I1125 13:02:23.852677 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" event={"ID":"5f3381e9-49d9-47ea-87dd-86442bf3394a","Type":"ContainerStarted","Data":"8cba1ba78ddd304634c489a295341cdf09b4ceafd9088bde59df14bb9c3d1fce"} Nov 25 13:02:23 crc kubenswrapper[4675]: I1125 13:02:23.880763 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" podStartSLOduration=2.6560907240000002 podStartE2EDuration="3.880740434s" podCreationTimestamp="2025-11-25 13:02:20 +0000 UTC" firstStartedPulling="2025-11-25 13:02:21.883858691 +0000 UTC m=+2087.055451022" lastFinishedPulling="2025-11-25 13:02:23.108508391 +0000 UTC m=+2088.280100732" observedRunningTime="2025-11-25 13:02:23.868730716 +0000 UTC m=+2089.040323067" watchObservedRunningTime="2025-11-25 13:02:23.880740434 +0000 UTC m=+2089.052332775" Nov 25 13:02:24 crc kubenswrapper[4675]: I1125 13:02:24.544977 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:02:24 crc kubenswrapper[4675]: I1125 13:02:24.594424 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:02:25 crc kubenswrapper[4675]: I1125 13:02:25.311511 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rzcbd"] Nov 25 13:02:25 crc kubenswrapper[4675]: I1125 13:02:25.869143 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rzcbd" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="registry-server" containerID="cri-o://63c9e8e464cb2684630f7b559f9e4365f6b131306f0da3a2b921b19a720cf406" gracePeriod=2 Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.882894 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerID="63c9e8e464cb2684630f7b559f9e4365f6b131306f0da3a2b921b19a720cf406" exitCode=0 Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.882971 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerDied","Data":"63c9e8e464cb2684630f7b559f9e4365f6b131306f0da3a2b921b19a720cf406"} Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.883262 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcbd" event={"ID":"e2d84373-af8f-4fb7-a44a-ddcd716fda33","Type":"ContainerDied","Data":"11484eac02e56918478be6d33bb4a6ed53d47b1e7524fc5092e5cee568aaa668"} Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.883286 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11484eac02e56918478be6d33bb4a6ed53d47b1e7524fc5092e5cee568aaa668" Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.962895 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.996461 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdwx9\" (UniqueName: \"kubernetes.io/projected/e2d84373-af8f-4fb7-a44a-ddcd716fda33-kube-api-access-fdwx9\") pod \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.996534 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-catalog-content\") pod \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.996656 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-utilities\") pod \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\" (UID: \"e2d84373-af8f-4fb7-a44a-ddcd716fda33\") " Nov 25 13:02:26 crc kubenswrapper[4675]: I1125 13:02:26.997893 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-utilities" (OuterVolumeSpecName: "utilities") pod "e2d84373-af8f-4fb7-a44a-ddcd716fda33" (UID: "e2d84373-af8f-4fb7-a44a-ddcd716fda33"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.015020 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2d84373-af8f-4fb7-a44a-ddcd716fda33-kube-api-access-fdwx9" (OuterVolumeSpecName: "kube-api-access-fdwx9") pod "e2d84373-af8f-4fb7-a44a-ddcd716fda33" (UID: "e2d84373-af8f-4fb7-a44a-ddcd716fda33"). InnerVolumeSpecName "kube-api-access-fdwx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.099675 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.099754 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdwx9\" (UniqueName: \"kubernetes.io/projected/e2d84373-af8f-4fb7-a44a-ddcd716fda33-kube-api-access-fdwx9\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.101583 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2d84373-af8f-4fb7-a44a-ddcd716fda33" (UID: "e2d84373-af8f-4fb7-a44a-ddcd716fda33"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.202158 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d84373-af8f-4fb7-a44a-ddcd716fda33-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.891759 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcbd" Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.916424 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rzcbd"] Nov 25 13:02:27 crc kubenswrapper[4675]: I1125 13:02:27.924218 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rzcbd"] Nov 25 13:02:29 crc kubenswrapper[4675]: I1125 13:02:29.543738 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" path="/var/lib/kubelet/pods/e2d84373-af8f-4fb7-a44a-ddcd716fda33/volumes" Nov 25 13:03:28 crc kubenswrapper[4675]: I1125 13:03:28.417474 4675 generic.go:334] "Generic (PLEG): container finished" podID="5f3381e9-49d9-47ea-87dd-86442bf3394a" containerID="8cba1ba78ddd304634c489a295341cdf09b4ceafd9088bde59df14bb9c3d1fce" exitCode=0 Nov 25 13:03:28 crc kubenswrapper[4675]: I1125 13:03:28.417574 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" event={"ID":"5f3381e9-49d9-47ea-87dd-86442bf3394a","Type":"ContainerDied","Data":"8cba1ba78ddd304634c489a295341cdf09b4ceafd9088bde59df14bb9c3d1fce"} Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.810725 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.857316 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovn-combined-ca-bundle\") pod \"5f3381e9-49d9-47ea-87dd-86442bf3394a\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.857417 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ssh-key\") pod \"5f3381e9-49d9-47ea-87dd-86442bf3394a\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.857513 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovncontroller-config-0\") pod \"5f3381e9-49d9-47ea-87dd-86442bf3394a\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.857927 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-inventory\") pod \"5f3381e9-49d9-47ea-87dd-86442bf3394a\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.857996 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4b9s\" (UniqueName: \"kubernetes.io/projected/5f3381e9-49d9-47ea-87dd-86442bf3394a-kube-api-access-f4b9s\") pod \"5f3381e9-49d9-47ea-87dd-86442bf3394a\" (UID: \"5f3381e9-49d9-47ea-87dd-86442bf3394a\") " Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.863344 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f3381e9-49d9-47ea-87dd-86442bf3394a-kube-api-access-f4b9s" (OuterVolumeSpecName: "kube-api-access-f4b9s") pod "5f3381e9-49d9-47ea-87dd-86442bf3394a" (UID: "5f3381e9-49d9-47ea-87dd-86442bf3394a"). InnerVolumeSpecName "kube-api-access-f4b9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.864098 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "5f3381e9-49d9-47ea-87dd-86442bf3394a" (UID: "5f3381e9-49d9-47ea-87dd-86442bf3394a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.884116 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "5f3381e9-49d9-47ea-87dd-86442bf3394a" (UID: "5f3381e9-49d9-47ea-87dd-86442bf3394a"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.890355 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f3381e9-49d9-47ea-87dd-86442bf3394a" (UID: "5f3381e9-49d9-47ea-87dd-86442bf3394a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.908136 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-inventory" (OuterVolumeSpecName: "inventory") pod "5f3381e9-49d9-47ea-87dd-86442bf3394a" (UID: "5f3381e9-49d9-47ea-87dd-86442bf3394a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.960424 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.960465 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4b9s\" (UniqueName: \"kubernetes.io/projected/5f3381e9-49d9-47ea-87dd-86442bf3394a-kube-api-access-f4b9s\") on node \"crc\" DevicePath \"\"" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.960480 4675 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.960490 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f3381e9-49d9-47ea-87dd-86442bf3394a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:03:29 crc kubenswrapper[4675]: I1125 13:03:29.960504 4675 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5f3381e9-49d9-47ea-87dd-86442bf3394a-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.444212 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" event={"ID":"5f3381e9-49d9-47ea-87dd-86442bf3394a","Type":"ContainerDied","Data":"2e9f4cccd6b6c2219db75f0ec9d2229daf5382345adeb7daa8e4f99547e2d0f9"} Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.444258 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e9f4cccd6b6c2219db75f0ec9d2229daf5382345adeb7daa8e4f99547e2d0f9" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.444320 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vq5tr" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.528055 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg"] Nov 25 13:03:30 crc kubenswrapper[4675]: E1125 13:03:30.528630 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="extract-utilities" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.528732 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="extract-utilities" Nov 25 13:03:30 crc kubenswrapper[4675]: E1125 13:03:30.528810 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="extract-content" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.528899 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="extract-content" Nov 25 13:03:30 crc kubenswrapper[4675]: E1125 13:03:30.528977 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="registry-server" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.529027 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="registry-server" Nov 25 13:03:30 crc kubenswrapper[4675]: E1125 13:03:30.529084 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3381e9-49d9-47ea-87dd-86442bf3394a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.529133 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3381e9-49d9-47ea-87dd-86442bf3394a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.529362 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f3381e9-49d9-47ea-87dd-86442bf3394a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.529526 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2d84373-af8f-4fb7-a44a-ddcd716fda33" containerName="registry-server" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.530182 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.532391 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.532391 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.532465 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.532762 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.532865 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.533130 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.597917 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg"] Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.691188 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.691360 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.691398 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj7xz\" (UniqueName: \"kubernetes.io/projected/4ee951ab-e497-4274-9251-85c92c498b0e-kube-api-access-sj7xz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.691449 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.691901 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.691989 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.793928 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.794030 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.794123 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.794248 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.795083 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.795149 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj7xz\" (UniqueName: \"kubernetes.io/projected/4ee951ab-e497-4274-9251-85c92c498b0e-kube-api-access-sj7xz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.799440 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.799588 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.800408 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.806904 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.807717 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.816758 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj7xz\" (UniqueName: \"kubernetes.io/projected/4ee951ab-e497-4274-9251-85c92c498b0e-kube-api-access-sj7xz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:30 crc kubenswrapper[4675]: I1125 13:03:30.894539 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:03:31 crc kubenswrapper[4675]: I1125 13:03:31.411676 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg"] Nov 25 13:03:31 crc kubenswrapper[4675]: I1125 13:03:31.452738 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" event={"ID":"4ee951ab-e497-4274-9251-85c92c498b0e","Type":"ContainerStarted","Data":"b79078055c1d0a9f48719133ad85c39307f73bfc267bf43889618f5bce02f77c"} Nov 25 13:03:33 crc kubenswrapper[4675]: I1125 13:03:33.473046 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" event={"ID":"4ee951ab-e497-4274-9251-85c92c498b0e","Type":"ContainerStarted","Data":"8a20bd87651a86d04130100cf704a9664df8b7081ab4cb5d3c517b317daa738c"} Nov 25 13:03:33 crc kubenswrapper[4675]: I1125 13:03:33.492693 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" podStartSLOduration=2.602308349 podStartE2EDuration="3.492675882s" podCreationTimestamp="2025-11-25 13:03:30 +0000 UTC" firstStartedPulling="2025-11-25 13:03:31.420176162 +0000 UTC m=+2156.591768503" lastFinishedPulling="2025-11-25 13:03:32.310543685 +0000 UTC m=+2157.482136036" observedRunningTime="2025-11-25 13:03:33.489362898 +0000 UTC m=+2158.660955239" watchObservedRunningTime="2025-11-25 13:03:33.492675882 +0000 UTC m=+2158.664268213" Nov 25 13:04:19 crc kubenswrapper[4675]: I1125 13:04:19.850472 4675 generic.go:334] "Generic (PLEG): container finished" podID="4ee951ab-e497-4274-9251-85c92c498b0e" containerID="8a20bd87651a86d04130100cf704a9664df8b7081ab4cb5d3c517b317daa738c" exitCode=0 Nov 25 13:04:19 crc kubenswrapper[4675]: I1125 13:04:19.850558 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" event={"ID":"4ee951ab-e497-4274-9251-85c92c498b0e","Type":"ContainerDied","Data":"8a20bd87651a86d04130100cf704a9664df8b7081ab4cb5d3c517b317daa738c"} Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.272317 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.468962 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-nova-metadata-neutron-config-0\") pod \"4ee951ab-e497-4274-9251-85c92c498b0e\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.469053 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sj7xz\" (UniqueName: \"kubernetes.io/projected/4ee951ab-e497-4274-9251-85c92c498b0e-kube-api-access-sj7xz\") pod \"4ee951ab-e497-4274-9251-85c92c498b0e\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.469134 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-inventory\") pod \"4ee951ab-e497-4274-9251-85c92c498b0e\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.469179 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-metadata-combined-ca-bundle\") pod \"4ee951ab-e497-4274-9251-85c92c498b0e\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.469273 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-ssh-key\") pod \"4ee951ab-e497-4274-9251-85c92c498b0e\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.469346 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"4ee951ab-e497-4274-9251-85c92c498b0e\" (UID: \"4ee951ab-e497-4274-9251-85c92c498b0e\") " Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.474919 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4ee951ab-e497-4274-9251-85c92c498b0e" (UID: "4ee951ab-e497-4274-9251-85c92c498b0e"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.478459 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ee951ab-e497-4274-9251-85c92c498b0e-kube-api-access-sj7xz" (OuterVolumeSpecName: "kube-api-access-sj7xz") pod "4ee951ab-e497-4274-9251-85c92c498b0e" (UID: "4ee951ab-e497-4274-9251-85c92c498b0e"). InnerVolumeSpecName "kube-api-access-sj7xz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.497561 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "4ee951ab-e497-4274-9251-85c92c498b0e" (UID: "4ee951ab-e497-4274-9251-85c92c498b0e"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.498154 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4ee951ab-e497-4274-9251-85c92c498b0e" (UID: "4ee951ab-e497-4274-9251-85c92c498b0e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.500830 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "4ee951ab-e497-4274-9251-85c92c498b0e" (UID: "4ee951ab-e497-4274-9251-85c92c498b0e"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.503200 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-inventory" (OuterVolumeSpecName: "inventory") pod "4ee951ab-e497-4274-9251-85c92c498b0e" (UID: "4ee951ab-e497-4274-9251-85c92c498b0e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.571935 4675 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.571989 4675 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.572009 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sj7xz\" (UniqueName: \"kubernetes.io/projected/4ee951ab-e497-4274-9251-85c92c498b0e-kube-api-access-sj7xz\") on node \"crc\" DevicePath \"\"" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.572026 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.572041 4675 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.572059 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4ee951ab-e497-4274-9251-85c92c498b0e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.870996 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" event={"ID":"4ee951ab-e497-4274-9251-85c92c498b0e","Type":"ContainerDied","Data":"b79078055c1d0a9f48719133ad85c39307f73bfc267bf43889618f5bce02f77c"} Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.871038 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b79078055c1d0a9f48719133ad85c39307f73bfc267bf43889618f5bce02f77c" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.871053 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.979702 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr"] Nov 25 13:04:21 crc kubenswrapper[4675]: E1125 13:04:21.980164 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ee951ab-e497-4274-9251-85c92c498b0e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.980182 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ee951ab-e497-4274-9251-85c92c498b0e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.980402 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ee951ab-e497-4274-9251-85c92c498b0e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.982899 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.997596 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.997792 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.997914 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:04:21 crc kubenswrapper[4675]: I1125 13:04:21.999717 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:21.999982 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.023177 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr"] Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.188711 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfshs\" (UniqueName: \"kubernetes.io/projected/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-kube-api-access-bfshs\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.189082 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.189203 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.189239 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.189309 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.291270 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfshs\" (UniqueName: \"kubernetes.io/projected/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-kube-api-access-bfshs\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.291326 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.291405 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.291424 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.291470 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.296004 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.296293 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.297604 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.304595 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.309521 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfshs\" (UniqueName: \"kubernetes.io/projected/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-kube-api-access-bfshs\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:22 crc kubenswrapper[4675]: I1125 13:04:22.603964 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:04:23 crc kubenswrapper[4675]: I1125 13:04:23.157746 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr"] Nov 25 13:04:23 crc kubenswrapper[4675]: I1125 13:04:23.888619 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" event={"ID":"c466bd75-6cb4-452f-a4fa-d9a5dbec6840","Type":"ContainerStarted","Data":"3102712ed16c3a4799fb57c465a73fd770311a1c2326fe8240d734a2741e0c5f"} Nov 25 13:04:23 crc kubenswrapper[4675]: I1125 13:04:23.888958 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" event={"ID":"c466bd75-6cb4-452f-a4fa-d9a5dbec6840","Type":"ContainerStarted","Data":"9915b88db5cc2e151fa9c3b36005f8bcc1dba14de43feb609ebeb4c69fd1818b"} Nov 25 13:04:23 crc kubenswrapper[4675]: I1125 13:04:23.921133 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" podStartSLOduration=2.488834677 podStartE2EDuration="2.92111608s" podCreationTimestamp="2025-11-25 13:04:21 +0000 UTC" firstStartedPulling="2025-11-25 13:04:23.147127644 +0000 UTC m=+2208.318719985" lastFinishedPulling="2025-11-25 13:04:23.579409047 +0000 UTC m=+2208.751001388" observedRunningTime="2025-11-25 13:04:23.90304731 +0000 UTC m=+2209.074639651" watchObservedRunningTime="2025-11-25 13:04:23.92111608 +0000 UTC m=+2209.092708421" Nov 25 13:04:43 crc kubenswrapper[4675]: I1125 13:04:43.662602 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:04:43 crc kubenswrapper[4675]: I1125 13:04:43.663205 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:05:13 crc kubenswrapper[4675]: I1125 13:05:13.662111 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:05:13 crc kubenswrapper[4675]: I1125 13:05:13.662735 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:05:43 crc kubenswrapper[4675]: I1125 13:05:43.662315 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:05:43 crc kubenswrapper[4675]: I1125 13:05:43.662890 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:05:43 crc kubenswrapper[4675]: I1125 13:05:43.662939 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:05:43 crc kubenswrapper[4675]: I1125 13:05:43.663660 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:05:43 crc kubenswrapper[4675]: I1125 13:05:43.663722 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" gracePeriod=600 Nov 25 13:05:44 crc kubenswrapper[4675]: E1125 13:05:44.040005 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:05:44 crc kubenswrapper[4675]: I1125 13:05:44.620317 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" exitCode=0 Nov 25 13:05:44 crc kubenswrapper[4675]: I1125 13:05:44.620366 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935"} Nov 25 13:05:44 crc kubenswrapper[4675]: I1125 13:05:44.620402 4675 scope.go:117] "RemoveContainer" containerID="77326986e63dd4ee6e1e015549b847b50974a5865176ff1042fefaba40517e76" Nov 25 13:05:44 crc kubenswrapper[4675]: I1125 13:05:44.621078 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:05:44 crc kubenswrapper[4675]: E1125 13:05:44.621448 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:05:56 crc kubenswrapper[4675]: I1125 13:05:56.532990 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:05:56 crc kubenswrapper[4675]: E1125 13:05:56.533653 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:06:11 crc kubenswrapper[4675]: I1125 13:06:11.532449 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:06:11 crc kubenswrapper[4675]: E1125 13:06:11.533166 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:06:26 crc kubenswrapper[4675]: I1125 13:06:26.535722 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:06:26 crc kubenswrapper[4675]: E1125 13:06:26.536587 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:06:38 crc kubenswrapper[4675]: I1125 13:06:38.532679 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:06:38 crc kubenswrapper[4675]: E1125 13:06:38.533521 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:06:53 crc kubenswrapper[4675]: I1125 13:06:53.532328 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:06:53 crc kubenswrapper[4675]: E1125 13:06:53.537355 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:07:08 crc kubenswrapper[4675]: I1125 13:07:08.532515 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:07:08 crc kubenswrapper[4675]: E1125 13:07:08.533276 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:07:21 crc kubenswrapper[4675]: I1125 13:07:21.533496 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:07:21 crc kubenswrapper[4675]: E1125 13:07:21.534480 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:07:34 crc kubenswrapper[4675]: I1125 13:07:34.532583 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:07:34 crc kubenswrapper[4675]: E1125 13:07:34.533241 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:07:47 crc kubenswrapper[4675]: I1125 13:07:47.533070 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:07:47 crc kubenswrapper[4675]: E1125 13:07:47.533880 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:07:58 crc kubenswrapper[4675]: I1125 13:07:58.532638 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:07:58 crc kubenswrapper[4675]: E1125 13:07:58.533516 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:08:12 crc kubenswrapper[4675]: I1125 13:08:12.532857 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:08:12 crc kubenswrapper[4675]: E1125 13:08:12.533593 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:08:24 crc kubenswrapper[4675]: I1125 13:08:24.533192 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:08:24 crc kubenswrapper[4675]: E1125 13:08:24.534480 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:08:38 crc kubenswrapper[4675]: I1125 13:08:38.533582 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:08:38 crc kubenswrapper[4675]: E1125 13:08:38.534453 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:08:47 crc kubenswrapper[4675]: I1125 13:08:47.101900 4675 scope.go:117] "RemoveContainer" containerID="23b79ce6eba89c02befc098d3eec0957a09f1347f7624e639a2f791f5944d2e9" Nov 25 13:08:47 crc kubenswrapper[4675]: I1125 13:08:47.124746 4675 scope.go:117] "RemoveContainer" containerID="63c9e8e464cb2684630f7b559f9e4365f6b131306f0da3a2b921b19a720cf406" Nov 25 13:08:47 crc kubenswrapper[4675]: I1125 13:08:47.174629 4675 scope.go:117] "RemoveContainer" containerID="73ea3eb665b2b2af5941acbae075aee7ae6b7ccf5d9c97ad0c5e3cd3f3538127" Nov 25 13:08:53 crc kubenswrapper[4675]: I1125 13:08:53.532448 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:08:53 crc kubenswrapper[4675]: E1125 13:08:53.533199 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:09:00 crc kubenswrapper[4675]: I1125 13:09:00.363403 4675 generic.go:334] "Generic (PLEG): container finished" podID="c466bd75-6cb4-452f-a4fa-d9a5dbec6840" containerID="3102712ed16c3a4799fb57c465a73fd770311a1c2326fe8240d734a2741e0c5f" exitCode=0 Nov 25 13:09:00 crc kubenswrapper[4675]: I1125 13:09:00.363461 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" event={"ID":"c466bd75-6cb4-452f-a4fa-d9a5dbec6840","Type":"ContainerDied","Data":"3102712ed16c3a4799fb57c465a73fd770311a1c2326fe8240d734a2741e0c5f"} Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.827299 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.937060 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-secret-0\") pod \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.937113 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-ssh-key\") pod \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.937208 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-inventory\") pod \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.937247 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfshs\" (UniqueName: \"kubernetes.io/projected/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-kube-api-access-bfshs\") pod \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.937301 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-combined-ca-bundle\") pod \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\" (UID: \"c466bd75-6cb4-452f-a4fa-d9a5dbec6840\") " Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.950239 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "c466bd75-6cb4-452f-a4fa-d9a5dbec6840" (UID: "c466bd75-6cb4-452f-a4fa-d9a5dbec6840"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.950320 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-kube-api-access-bfshs" (OuterVolumeSpecName: "kube-api-access-bfshs") pod "c466bd75-6cb4-452f-a4fa-d9a5dbec6840" (UID: "c466bd75-6cb4-452f-a4fa-d9a5dbec6840"). InnerVolumeSpecName "kube-api-access-bfshs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.971807 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-inventory" (OuterVolumeSpecName: "inventory") pod "c466bd75-6cb4-452f-a4fa-d9a5dbec6840" (UID: "c466bd75-6cb4-452f-a4fa-d9a5dbec6840"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.972464 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c466bd75-6cb4-452f-a4fa-d9a5dbec6840" (UID: "c466bd75-6cb4-452f-a4fa-d9a5dbec6840"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:09:01 crc kubenswrapper[4675]: I1125 13:09:01.976241 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "c466bd75-6cb4-452f-a4fa-d9a5dbec6840" (UID: "c466bd75-6cb4-452f-a4fa-d9a5dbec6840"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.060975 4675 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.061009 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.061022 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.061034 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfshs\" (UniqueName: \"kubernetes.io/projected/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-kube-api-access-bfshs\") on node \"crc\" DevicePath \"\"" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.061062 4675 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c466bd75-6cb4-452f-a4fa-d9a5dbec6840-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.404266 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" event={"ID":"c466bd75-6cb4-452f-a4fa-d9a5dbec6840","Type":"ContainerDied","Data":"9915b88db5cc2e151fa9c3b36005f8bcc1dba14de43feb609ebeb4c69fd1818b"} Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.404722 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9915b88db5cc2e151fa9c3b36005f8bcc1dba14de43feb609ebeb4c69fd1818b" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.404307 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.557468 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l"] Nov 25 13:09:02 crc kubenswrapper[4675]: E1125 13:09:02.558222 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c466bd75-6cb4-452f-a4fa-d9a5dbec6840" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.558997 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="c466bd75-6cb4-452f-a4fa-d9a5dbec6840" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.559348 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="c466bd75-6cb4-452f-a4fa-d9a5dbec6840" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.561101 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.564580 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.565727 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.565980 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.569246 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.571470 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.571741 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.573766 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.586723 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l"] Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.673248 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.673747 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.674012 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.676845 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.677154 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.677283 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.677486 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.677681 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.677852 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gznkp\" (UniqueName: \"kubernetes.io/projected/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-kube-api-access-gznkp\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.779669 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780023 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780189 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780341 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780434 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780521 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780662 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780789 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.780932 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gznkp\" (UniqueName: \"kubernetes.io/projected/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-kube-api-access-gznkp\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.783481 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.786277 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.787016 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.787640 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.787646 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.787858 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.791541 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.793301 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.804672 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gznkp\" (UniqueName: \"kubernetes.io/projected/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-kube-api-access-gznkp\") pod \"nova-edpm-deployment-openstack-edpm-ipam-l9q4l\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:02 crc kubenswrapper[4675]: I1125 13:09:02.881352 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:09:03 crc kubenswrapper[4675]: I1125 13:09:03.403475 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l"] Nov 25 13:09:03 crc kubenswrapper[4675]: I1125 13:09:03.414287 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:09:04 crc kubenswrapper[4675]: I1125 13:09:04.430698 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" event={"ID":"4c8ba7b5-22cd-44f4-9389-1f352f9a2368","Type":"ContainerStarted","Data":"8a94102389ef073f0da3756de990eb05638a1ea67bae64345ecd90cd3166b338"} Nov 25 13:09:04 crc kubenswrapper[4675]: I1125 13:09:04.431879 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" event={"ID":"4c8ba7b5-22cd-44f4-9389-1f352f9a2368","Type":"ContainerStarted","Data":"eab818d619c07852097addde503975a30b3454c9ff1e4ecb689250a357ddfcc8"} Nov 25 13:09:04 crc kubenswrapper[4675]: I1125 13:09:04.455897 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" podStartSLOduration=1.938061435 podStartE2EDuration="2.455870723s" podCreationTimestamp="2025-11-25 13:09:02 +0000 UTC" firstStartedPulling="2025-11-25 13:09:03.413880764 +0000 UTC m=+2488.585473105" lastFinishedPulling="2025-11-25 13:09:03.931690052 +0000 UTC m=+2489.103282393" observedRunningTime="2025-11-25 13:09:04.448135997 +0000 UTC m=+2489.619728338" watchObservedRunningTime="2025-11-25 13:09:04.455870723 +0000 UTC m=+2489.627463084" Nov 25 13:09:07 crc kubenswrapper[4675]: I1125 13:09:07.533367 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:09:07 crc kubenswrapper[4675]: E1125 13:09:07.534150 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:09:19 crc kubenswrapper[4675]: I1125 13:09:19.532745 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:09:19 crc kubenswrapper[4675]: E1125 13:09:19.533515 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:09:34 crc kubenswrapper[4675]: I1125 13:09:34.545121 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:09:34 crc kubenswrapper[4675]: E1125 13:09:34.546749 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:09:47 crc kubenswrapper[4675]: I1125 13:09:47.533186 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:09:47 crc kubenswrapper[4675]: E1125 13:09:47.535386 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:10:01 crc kubenswrapper[4675]: I1125 13:10:01.533513 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:10:01 crc kubenswrapper[4675]: E1125 13:10:01.534539 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:10:14 crc kubenswrapper[4675]: I1125 13:10:14.532506 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:10:14 crc kubenswrapper[4675]: E1125 13:10:14.533215 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:10:29 crc kubenswrapper[4675]: I1125 13:10:29.532558 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:10:29 crc kubenswrapper[4675]: E1125 13:10:29.534089 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:10:43 crc kubenswrapper[4675]: I1125 13:10:43.532684 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:10:43 crc kubenswrapper[4675]: E1125 13:10:43.533640 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:10:54 crc kubenswrapper[4675]: I1125 13:10:54.532776 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:10:55 crc kubenswrapper[4675]: I1125 13:10:55.440995 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"c850206e7dacec0adcea0a87716c476086670e247da58f26dc8aa843626aa675"} Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.599019 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w7g4z"] Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.602744 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.648907 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w7g4z"] Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.674379 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-catalog-content\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.674943 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-utilities\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.675551 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7bt5\" (UniqueName: \"kubernetes.io/projected/2389699a-3ec5-4a7a-88d1-d08154807f22-kube-api-access-v7bt5\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.777848 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7bt5\" (UniqueName: \"kubernetes.io/projected/2389699a-3ec5-4a7a-88d1-d08154807f22-kube-api-access-v7bt5\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.778312 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-catalog-content\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.778348 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-utilities\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.778808 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-catalog-content\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.778976 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-utilities\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.808581 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7bt5\" (UniqueName: \"kubernetes.io/projected/2389699a-3ec5-4a7a-88d1-d08154807f22-kube-api-access-v7bt5\") pod \"certified-operators-w7g4z\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:05 crc kubenswrapper[4675]: I1125 13:11:05.955473 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:06 crc kubenswrapper[4675]: I1125 13:11:06.698274 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w7g4z"] Nov 25 13:11:07 crc kubenswrapper[4675]: I1125 13:11:07.533940 4675 generic.go:334] "Generic (PLEG): container finished" podID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerID="db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b" exitCode=0 Nov 25 13:11:07 crc kubenswrapper[4675]: I1125 13:11:07.543091 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerDied","Data":"db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b"} Nov 25 13:11:07 crc kubenswrapper[4675]: I1125 13:11:07.543131 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerStarted","Data":"837a23ee0e612d2cbf5c0d2c62ce8dcd1809ad21d5a3d6170d916e55ae94c325"} Nov 25 13:11:08 crc kubenswrapper[4675]: I1125 13:11:08.543729 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerStarted","Data":"fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d"} Nov 25 13:11:09 crc kubenswrapper[4675]: I1125 13:11:09.555982 4675 generic.go:334] "Generic (PLEG): container finished" podID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerID="fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d" exitCode=0 Nov 25 13:11:09 crc kubenswrapper[4675]: I1125 13:11:09.556078 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerDied","Data":"fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d"} Nov 25 13:11:10 crc kubenswrapper[4675]: I1125 13:11:10.568145 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerStarted","Data":"f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154"} Nov 25 13:11:15 crc kubenswrapper[4675]: I1125 13:11:15.956684 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:15 crc kubenswrapper[4675]: I1125 13:11:15.957034 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:16 crc kubenswrapper[4675]: I1125 13:11:16.005548 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:16 crc kubenswrapper[4675]: I1125 13:11:16.026429 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w7g4z" podStartSLOduration=8.319240235 podStartE2EDuration="11.026411021s" podCreationTimestamp="2025-11-25 13:11:05 +0000 UTC" firstStartedPulling="2025-11-25 13:11:07.536030662 +0000 UTC m=+2612.707623013" lastFinishedPulling="2025-11-25 13:11:10.243201458 +0000 UTC m=+2615.414793799" observedRunningTime="2025-11-25 13:11:10.595241504 +0000 UTC m=+2615.766833845" watchObservedRunningTime="2025-11-25 13:11:16.026411021 +0000 UTC m=+2621.198003362" Nov 25 13:11:16 crc kubenswrapper[4675]: I1125 13:11:16.691648 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:16 crc kubenswrapper[4675]: I1125 13:11:16.751187 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w7g4z"] Nov 25 13:11:18 crc kubenswrapper[4675]: I1125 13:11:18.644537 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w7g4z" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="registry-server" containerID="cri-o://f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154" gracePeriod=2 Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.113129 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.248058 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7bt5\" (UniqueName: \"kubernetes.io/projected/2389699a-3ec5-4a7a-88d1-d08154807f22-kube-api-access-v7bt5\") pod \"2389699a-3ec5-4a7a-88d1-d08154807f22\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.249014 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-utilities\") pod \"2389699a-3ec5-4a7a-88d1-d08154807f22\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.249064 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-catalog-content\") pod \"2389699a-3ec5-4a7a-88d1-d08154807f22\" (UID: \"2389699a-3ec5-4a7a-88d1-d08154807f22\") " Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.249979 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-utilities" (OuterVolumeSpecName: "utilities") pod "2389699a-3ec5-4a7a-88d1-d08154807f22" (UID: "2389699a-3ec5-4a7a-88d1-d08154807f22"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.257646 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2389699a-3ec5-4a7a-88d1-d08154807f22-kube-api-access-v7bt5" (OuterVolumeSpecName: "kube-api-access-v7bt5") pod "2389699a-3ec5-4a7a-88d1-d08154807f22" (UID: "2389699a-3ec5-4a7a-88d1-d08154807f22"). InnerVolumeSpecName "kube-api-access-v7bt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.298271 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2389699a-3ec5-4a7a-88d1-d08154807f22" (UID: "2389699a-3ec5-4a7a-88d1-d08154807f22"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.351346 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.351384 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2389699a-3ec5-4a7a-88d1-d08154807f22-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.351399 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7bt5\" (UniqueName: \"kubernetes.io/projected/2389699a-3ec5-4a7a-88d1-d08154807f22-kube-api-access-v7bt5\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.657614 4675 generic.go:334] "Generic (PLEG): container finished" podID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerID="f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154" exitCode=0 Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.657658 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerDied","Data":"f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154"} Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.657684 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7g4z" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.657703 4675 scope.go:117] "RemoveContainer" containerID="f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.657693 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7g4z" event={"ID":"2389699a-3ec5-4a7a-88d1-d08154807f22","Type":"ContainerDied","Data":"837a23ee0e612d2cbf5c0d2c62ce8dcd1809ad21d5a3d6170d916e55ae94c325"} Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.689091 4675 scope.go:117] "RemoveContainer" containerID="fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.691823 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w7g4z"] Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.702612 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w7g4z"] Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.711351 4675 scope.go:117] "RemoveContainer" containerID="db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.744793 4675 scope.go:117] "RemoveContainer" containerID="f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154" Nov 25 13:11:19 crc kubenswrapper[4675]: E1125 13:11:19.745237 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154\": container with ID starting with f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154 not found: ID does not exist" containerID="f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.745280 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154"} err="failed to get container status \"f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154\": rpc error: code = NotFound desc = could not find container \"f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154\": container with ID starting with f832956fd4666bae28382ec9a4a1cc8e08d1f840b6f761a17f98f5e8b42ba154 not found: ID does not exist" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.745313 4675 scope.go:117] "RemoveContainer" containerID="fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d" Nov 25 13:11:19 crc kubenswrapper[4675]: E1125 13:11:19.745659 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d\": container with ID starting with fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d not found: ID does not exist" containerID="fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.745704 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d"} err="failed to get container status \"fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d\": rpc error: code = NotFound desc = could not find container \"fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d\": container with ID starting with fdc1289466d8023f455538c9e932137576f04975aa05d9a53c9da4e5cec3445d not found: ID does not exist" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.745758 4675 scope.go:117] "RemoveContainer" containerID="db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b" Nov 25 13:11:19 crc kubenswrapper[4675]: E1125 13:11:19.746230 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b\": container with ID starting with db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b not found: ID does not exist" containerID="db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b" Nov 25 13:11:19 crc kubenswrapper[4675]: I1125 13:11:19.746257 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b"} err="failed to get container status \"db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b\": rpc error: code = NotFound desc = could not find container \"db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b\": container with ID starting with db75f80d09a1f2257bad9baf9d674f2109e52b652bc272a47c2207e47274b26b not found: ID does not exist" Nov 25 13:11:21 crc kubenswrapper[4675]: I1125 13:11:21.545503 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" path="/var/lib/kubelet/pods/2389699a-3ec5-4a7a-88d1-d08154807f22/volumes" Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.053519 4675 generic.go:334] "Generic (PLEG): container finished" podID="4c8ba7b5-22cd-44f4-9389-1f352f9a2368" containerID="8a94102389ef073f0da3756de990eb05638a1ea67bae64345ecd90cd3166b338" exitCode=0 Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.053597 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" event={"ID":"4c8ba7b5-22cd-44f4-9389-1f352f9a2368","Type":"ContainerDied","Data":"8a94102389ef073f0da3756de990eb05638a1ea67bae64345ecd90cd3166b338"} Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.983093 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mbhkw"] Nov 25 13:11:49 crc kubenswrapper[4675]: E1125 13:11:49.984090 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="extract-content" Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.984108 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="extract-content" Nov 25 13:11:49 crc kubenswrapper[4675]: E1125 13:11:49.984125 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="extract-utilities" Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.984133 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="extract-utilities" Nov 25 13:11:49 crc kubenswrapper[4675]: E1125 13:11:49.984162 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="registry-server" Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.984171 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="registry-server" Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.984527 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2389699a-3ec5-4a7a-88d1-d08154807f22" containerName="registry-server" Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.996589 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbhkw"] Nov 25 13:11:49 crc kubenswrapper[4675]: I1125 13:11:49.996762 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.120913 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbfvd\" (UniqueName: \"kubernetes.io/projected/ed22e285-c4c2-403a-ace9-37402c049fae-kube-api-access-jbfvd\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.120984 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed22e285-c4c2-403a-ace9-37402c049fae-utilities\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.121073 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed22e285-c4c2-403a-ace9-37402c049fae-catalog-content\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.237583 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbfvd\" (UniqueName: \"kubernetes.io/projected/ed22e285-c4c2-403a-ace9-37402c049fae-kube-api-access-jbfvd\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.237707 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed22e285-c4c2-403a-ace9-37402c049fae-utilities\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.238539 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed22e285-c4c2-403a-ace9-37402c049fae-utilities\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.238843 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed22e285-c4c2-403a-ace9-37402c049fae-catalog-content\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.240904 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed22e285-c4c2-403a-ace9-37402c049fae-catalog-content\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.281895 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbfvd\" (UniqueName: \"kubernetes.io/projected/ed22e285-c4c2-403a-ace9-37402c049fae-kube-api-access-jbfvd\") pod \"redhat-marketplace-mbhkw\" (UID: \"ed22e285-c4c2-403a-ace9-37402c049fae\") " pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.340216 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.598905 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.648896 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-ssh-key\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.648952 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-extra-config-0\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649095 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-0\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649158 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-combined-ca-bundle\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649220 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gznkp\" (UniqueName: \"kubernetes.io/projected/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-kube-api-access-gznkp\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649248 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-1\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649288 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-0\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649320 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-1\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.649350 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-inventory\") pod \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\" (UID: \"4c8ba7b5-22cd-44f4-9389-1f352f9a2368\") " Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.665188 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-kube-api-access-gznkp" (OuterVolumeSpecName: "kube-api-access-gznkp") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "kube-api-access-gznkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.666763 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.688672 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.692520 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-inventory" (OuterVolumeSpecName: "inventory") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.704123 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.705369 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.720227 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.728694 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.738982 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "4c8ba7b5-22cd-44f4-9389-1f352f9a2368" (UID: "4c8ba7b5-22cd-44f4-9389-1f352f9a2368"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751199 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gznkp\" (UniqueName: \"kubernetes.io/projected/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-kube-api-access-gznkp\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751233 4675 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751245 4675 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751257 4675 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751269 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751280 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751292 4675 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751303 4675 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.751313 4675 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8ba7b5-22cd-44f4-9389-1f352f9a2368-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:11:50 crc kubenswrapper[4675]: I1125 13:11:50.941625 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbhkw"] Nov 25 13:11:50 crc kubenswrapper[4675]: W1125 13:11:50.950734 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded22e285_c4c2_403a_ace9_37402c049fae.slice/crio-ac85e942a1480ce9be77413b2fcfc4ffdad5059966ef1ac02fbd0246af605865 WatchSource:0}: Error finding container ac85e942a1480ce9be77413b2fcfc4ffdad5059966ef1ac02fbd0246af605865: Status 404 returned error can't find the container with id ac85e942a1480ce9be77413b2fcfc4ffdad5059966ef1ac02fbd0246af605865 Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.072034 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbhkw" event={"ID":"ed22e285-c4c2-403a-ace9-37402c049fae","Type":"ContainerStarted","Data":"ac85e942a1480ce9be77413b2fcfc4ffdad5059966ef1ac02fbd0246af605865"} Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.074637 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" event={"ID":"4c8ba7b5-22cd-44f4-9389-1f352f9a2368","Type":"ContainerDied","Data":"eab818d619c07852097addde503975a30b3454c9ff1e4ecb689250a357ddfcc8"} Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.074673 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eab818d619c07852097addde503975a30b3454c9ff1e4ecb689250a357ddfcc8" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.074734 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-l9q4l" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.192297 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj"] Nov 25 13:11:51 crc kubenswrapper[4675]: E1125 13:11:51.192790 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c8ba7b5-22cd-44f4-9389-1f352f9a2368" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.192808 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c8ba7b5-22cd-44f4-9389-1f352f9a2368" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.193146 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c8ba7b5-22cd-44f4-9389-1f352f9a2368" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.194683 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.200399 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.200515 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.200548 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.201011 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7p756" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.204703 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.204758 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj"] Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386278 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386322 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386340 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386408 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj67c\" (UniqueName: \"kubernetes.io/projected/3ca0e52a-979d-4834-a03d-135355de72db-kube-api-access-xj67c\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386486 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386552 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.386580 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.487784 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj67c\" (UniqueName: \"kubernetes.io/projected/3ca0e52a-979d-4834-a03d-135355de72db-kube-api-access-xj67c\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.487864 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.487912 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.487932 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.487978 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.488017 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.488036 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.493624 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.494527 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.494584 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.494703 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.494908 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.507688 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj67c\" (UniqueName: \"kubernetes.io/projected/3ca0e52a-979d-4834-a03d-135355de72db-kube-api-access-xj67c\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.509491 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:51 crc kubenswrapper[4675]: I1125 13:11:51.530471 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:11:52 crc kubenswrapper[4675]: I1125 13:11:52.078564 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj"] Nov 25 13:11:52 crc kubenswrapper[4675]: I1125 13:11:52.089158 4675 generic.go:334] "Generic (PLEG): container finished" podID="ed22e285-c4c2-403a-ace9-37402c049fae" containerID="163c38a6750339d7b5e2315361ababc15f214cc5a1c6ebb900df03088c936aa4" exitCode=0 Nov 25 13:11:52 crc kubenswrapper[4675]: I1125 13:11:52.089201 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbhkw" event={"ID":"ed22e285-c4c2-403a-ace9-37402c049fae","Type":"ContainerDied","Data":"163c38a6750339d7b5e2315361ababc15f214cc5a1c6ebb900df03088c936aa4"} Nov 25 13:11:53 crc kubenswrapper[4675]: I1125 13:11:53.100987 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" event={"ID":"3ca0e52a-979d-4834-a03d-135355de72db","Type":"ContainerStarted","Data":"c7d72baad23c01a84111487a87dc5505a6029e2fe1e0eaa5e2862fc9031bff6e"} Nov 25 13:11:53 crc kubenswrapper[4675]: I1125 13:11:53.101342 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" event={"ID":"3ca0e52a-979d-4834-a03d-135355de72db","Type":"ContainerStarted","Data":"c0d861b379cc2bd7fd0801c3c95bd6af548b3b19617bd8be1488de846ee3683b"} Nov 25 13:11:53 crc kubenswrapper[4675]: I1125 13:11:53.120902 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" podStartSLOduration=1.4486725630000001 podStartE2EDuration="2.120800579s" podCreationTimestamp="2025-11-25 13:11:51 +0000 UTC" firstStartedPulling="2025-11-25 13:11:52.099197979 +0000 UTC m=+2657.270790320" lastFinishedPulling="2025-11-25 13:11:52.771325995 +0000 UTC m=+2657.942918336" observedRunningTime="2025-11-25 13:11:53.117259707 +0000 UTC m=+2658.288852058" watchObservedRunningTime="2025-11-25 13:11:53.120800579 +0000 UTC m=+2658.292392920" Nov 25 13:11:56 crc kubenswrapper[4675]: I1125 13:11:56.135111 4675 generic.go:334] "Generic (PLEG): container finished" podID="ed22e285-c4c2-403a-ace9-37402c049fae" containerID="6f071210ce9c1dca1babcd0607a8e297eaa183d59ef708fb28ee1e17b21d2008" exitCode=0 Nov 25 13:11:56 crc kubenswrapper[4675]: I1125 13:11:56.135211 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbhkw" event={"ID":"ed22e285-c4c2-403a-ace9-37402c049fae","Type":"ContainerDied","Data":"6f071210ce9c1dca1babcd0607a8e297eaa183d59ef708fb28ee1e17b21d2008"} Nov 25 13:11:57 crc kubenswrapper[4675]: I1125 13:11:57.147999 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbhkw" event={"ID":"ed22e285-c4c2-403a-ace9-37402c049fae","Type":"ContainerStarted","Data":"5db6594a84982e3e49b867fe09203623fa2dbd05a249e18d24219ce8e27f35ee"} Nov 25 13:11:57 crc kubenswrapper[4675]: I1125 13:11:57.181016 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mbhkw" podStartSLOduration=3.742343794 podStartE2EDuration="8.180994535s" podCreationTimestamp="2025-11-25 13:11:49 +0000 UTC" firstStartedPulling="2025-11-25 13:11:52.092223858 +0000 UTC m=+2657.263816199" lastFinishedPulling="2025-11-25 13:11:56.530874609 +0000 UTC m=+2661.702466940" observedRunningTime="2025-11-25 13:11:57.169953344 +0000 UTC m=+2662.341545685" watchObservedRunningTime="2025-11-25 13:11:57.180994535 +0000 UTC m=+2662.352586876" Nov 25 13:12:00 crc kubenswrapper[4675]: I1125 13:12:00.341438 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:12:00 crc kubenswrapper[4675]: I1125 13:12:00.341701 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:12:00 crc kubenswrapper[4675]: I1125 13:12:00.389501 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:12:01 crc kubenswrapper[4675]: I1125 13:12:01.271303 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mbhkw" Nov 25 13:12:01 crc kubenswrapper[4675]: I1125 13:12:01.394011 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbhkw"] Nov 25 13:12:01 crc kubenswrapper[4675]: I1125 13:12:01.444606 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p8qm"] Nov 25 13:12:01 crc kubenswrapper[4675]: I1125 13:12:01.444839 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2p8qm" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="registry-server" containerID="cri-o://d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86" gracePeriod=2 Nov 25 13:12:01 crc kubenswrapper[4675]: I1125 13:12:01.928667 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.092541 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-catalog-content\") pod \"0db468b2-8e51-4126-a30e-e8f562240e79\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.092660 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l88nv\" (UniqueName: \"kubernetes.io/projected/0db468b2-8e51-4126-a30e-e8f562240e79-kube-api-access-l88nv\") pod \"0db468b2-8e51-4126-a30e-e8f562240e79\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.092757 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-utilities\") pod \"0db468b2-8e51-4126-a30e-e8f562240e79\" (UID: \"0db468b2-8e51-4126-a30e-e8f562240e79\") " Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.095168 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-utilities" (OuterVolumeSpecName: "utilities") pod "0db468b2-8e51-4126-a30e-e8f562240e79" (UID: "0db468b2-8e51-4126-a30e-e8f562240e79"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.099387 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0db468b2-8e51-4126-a30e-e8f562240e79-kube-api-access-l88nv" (OuterVolumeSpecName: "kube-api-access-l88nv") pod "0db468b2-8e51-4126-a30e-e8f562240e79" (UID: "0db468b2-8e51-4126-a30e-e8f562240e79"). InnerVolumeSpecName "kube-api-access-l88nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.115219 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0db468b2-8e51-4126-a30e-e8f562240e79" (UID: "0db468b2-8e51-4126-a30e-e8f562240e79"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.195580 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.195623 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0db468b2-8e51-4126-a30e-e8f562240e79-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.195636 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l88nv\" (UniqueName: \"kubernetes.io/projected/0db468b2-8e51-4126-a30e-e8f562240e79-kube-api-access-l88nv\") on node \"crc\" DevicePath \"\"" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.215539 4675 generic.go:334] "Generic (PLEG): container finished" podID="0db468b2-8e51-4126-a30e-e8f562240e79" containerID="d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86" exitCode=0 Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.215576 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerDied","Data":"d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86"} Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.215605 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p8qm" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.215623 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p8qm" event={"ID":"0db468b2-8e51-4126-a30e-e8f562240e79","Type":"ContainerDied","Data":"c5ef3dd6ea47861f3d0707b32d2a2cffcd67d4630b63a08dc9a6b343e383ebd7"} Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.215640 4675 scope.go:117] "RemoveContainer" containerID="d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.239198 4675 scope.go:117] "RemoveContainer" containerID="b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.262020 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p8qm"] Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.267677 4675 scope.go:117] "RemoveContainer" containerID="6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.276812 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p8qm"] Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.302586 4675 scope.go:117] "RemoveContainer" containerID="d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86" Nov 25 13:12:02 crc kubenswrapper[4675]: E1125 13:12:02.303015 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86\": container with ID starting with d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86 not found: ID does not exist" containerID="d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.303067 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86"} err="failed to get container status \"d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86\": rpc error: code = NotFound desc = could not find container \"d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86\": container with ID starting with d9046303e175e4f82cc9a5a557d5ca6ecd1a98204c7e8e8e1c47bd6b91dace86 not found: ID does not exist" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.303099 4675 scope.go:117] "RemoveContainer" containerID="b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a" Nov 25 13:12:02 crc kubenswrapper[4675]: E1125 13:12:02.303525 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a\": container with ID starting with b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a not found: ID does not exist" containerID="b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.303579 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a"} err="failed to get container status \"b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a\": rpc error: code = NotFound desc = could not find container \"b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a\": container with ID starting with b20ae3535872d8cf7156ca81882e1ac8baecf152d08587fb26596706c4d1c63a not found: ID does not exist" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.303610 4675 scope.go:117] "RemoveContainer" containerID="6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35" Nov 25 13:12:02 crc kubenswrapper[4675]: E1125 13:12:02.304149 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35\": container with ID starting with 6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35 not found: ID does not exist" containerID="6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35" Nov 25 13:12:02 crc kubenswrapper[4675]: I1125 13:12:02.304178 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35"} err="failed to get container status \"6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35\": rpc error: code = NotFound desc = could not find container \"6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35\": container with ID starting with 6f92f850e5b75a6680d6b23c2310b5e07a82d9a7f6fec2b0d8983ab51d0b4a35 not found: ID does not exist" Nov 25 13:12:03 crc kubenswrapper[4675]: I1125 13:12:03.544630 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" path="/var/lib/kubelet/pods/0db468b2-8e51-4126-a30e-e8f562240e79/volumes" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.367554 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sr7ql"] Nov 25 13:12:56 crc kubenswrapper[4675]: E1125 13:12:56.368502 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="extract-utilities" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.368515 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="extract-utilities" Nov 25 13:12:56 crc kubenswrapper[4675]: E1125 13:12:56.368560 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="extract-content" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.368567 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="extract-content" Nov 25 13:12:56 crc kubenswrapper[4675]: E1125 13:12:56.368581 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="registry-server" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.368587 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="registry-server" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.368840 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db468b2-8e51-4126-a30e-e8f562240e79" containerName="registry-server" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.370290 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.384081 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sr7ql"] Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.398784 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-utilities\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.398875 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrqzd\" (UniqueName: \"kubernetes.io/projected/e5ffa9ba-1724-4874-aacd-05b843f91239-kube-api-access-wrqzd\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.398946 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-catalog-content\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.501532 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrqzd\" (UniqueName: \"kubernetes.io/projected/e5ffa9ba-1724-4874-aacd-05b843f91239-kube-api-access-wrqzd\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.501760 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-catalog-content\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.501890 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-utilities\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.502225 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-catalog-content\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.502234 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-utilities\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.532720 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrqzd\" (UniqueName: \"kubernetes.io/projected/e5ffa9ba-1724-4874-aacd-05b843f91239-kube-api-access-wrqzd\") pod \"community-operators-sr7ql\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:56 crc kubenswrapper[4675]: I1125 13:12:56.698101 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:12:57 crc kubenswrapper[4675]: I1125 13:12:57.373951 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sr7ql"] Nov 25 13:12:57 crc kubenswrapper[4675]: I1125 13:12:57.688860 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerID="23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab" exitCode=0 Nov 25 13:12:57 crc kubenswrapper[4675]: I1125 13:12:57.688929 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerDied","Data":"23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab"} Nov 25 13:12:57 crc kubenswrapper[4675]: I1125 13:12:57.689154 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerStarted","Data":"44bfeba41f140b6ef446b66b1be04ec2191bd1ac514f7284f94652edc0594e47"} Nov 25 13:12:58 crc kubenswrapper[4675]: I1125 13:12:58.700145 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerStarted","Data":"cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624"} Nov 25 13:12:59 crc kubenswrapper[4675]: I1125 13:12:59.717450 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerID="cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624" exitCode=0 Nov 25 13:12:59 crc kubenswrapper[4675]: I1125 13:12:59.717511 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerDied","Data":"cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624"} Nov 25 13:13:00 crc kubenswrapper[4675]: I1125 13:13:00.730711 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerStarted","Data":"0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28"} Nov 25 13:13:00 crc kubenswrapper[4675]: I1125 13:13:00.758276 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sr7ql" podStartSLOduration=2.308229025 podStartE2EDuration="4.758254322s" podCreationTimestamp="2025-11-25 13:12:56 +0000 UTC" firstStartedPulling="2025-11-25 13:12:57.692324077 +0000 UTC m=+2722.863916418" lastFinishedPulling="2025-11-25 13:13:00.142349374 +0000 UTC m=+2725.313941715" observedRunningTime="2025-11-25 13:13:00.749069851 +0000 UTC m=+2725.920662222" watchObservedRunningTime="2025-11-25 13:13:00.758254322 +0000 UTC m=+2725.929846673" Nov 25 13:13:03 crc kubenswrapper[4675]: I1125 13:13:03.738139 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vgcgs"] Nov 25 13:13:03 crc kubenswrapper[4675]: I1125 13:13:03.756767 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:03 crc kubenswrapper[4675]: I1125 13:13:03.758306 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vgcgs"] Nov 25 13:13:03 crc kubenswrapper[4675]: I1125 13:13:03.955129 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff144f02-0869-43a8-9371-690790fac643-catalog-content\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:03 crc kubenswrapper[4675]: I1125 13:13:03.955217 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qncj9\" (UniqueName: \"kubernetes.io/projected/ff144f02-0869-43a8-9371-690790fac643-kube-api-access-qncj9\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:03 crc kubenswrapper[4675]: I1125 13:13:03.955293 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff144f02-0869-43a8-9371-690790fac643-utilities\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.057178 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff144f02-0869-43a8-9371-690790fac643-catalog-content\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.057599 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qncj9\" (UniqueName: \"kubernetes.io/projected/ff144f02-0869-43a8-9371-690790fac643-kube-api-access-qncj9\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.057755 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff144f02-0869-43a8-9371-690790fac643-utilities\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.058589 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff144f02-0869-43a8-9371-690790fac643-utilities\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.059435 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff144f02-0869-43a8-9371-690790fac643-catalog-content\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.089171 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qncj9\" (UniqueName: \"kubernetes.io/projected/ff144f02-0869-43a8-9371-690790fac643-kube-api-access-qncj9\") pod \"redhat-operators-vgcgs\" (UID: \"ff144f02-0869-43a8-9371-690790fac643\") " pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.384837 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:04 crc kubenswrapper[4675]: I1125 13:13:04.890049 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vgcgs"] Nov 25 13:13:05 crc kubenswrapper[4675]: I1125 13:13:05.774452 4675 generic.go:334] "Generic (PLEG): container finished" podID="ff144f02-0869-43a8-9371-690790fac643" containerID="0de42c3fc589b1c75099207ce724a7fce2e70a89cb7f255c38a2205f17472828" exitCode=0 Nov 25 13:13:05 crc kubenswrapper[4675]: I1125 13:13:05.774571 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vgcgs" event={"ID":"ff144f02-0869-43a8-9371-690790fac643","Type":"ContainerDied","Data":"0de42c3fc589b1c75099207ce724a7fce2e70a89cb7f255c38a2205f17472828"} Nov 25 13:13:05 crc kubenswrapper[4675]: I1125 13:13:05.774746 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vgcgs" event={"ID":"ff144f02-0869-43a8-9371-690790fac643","Type":"ContainerStarted","Data":"12d606228555da24eca49291caaff8503bee9b4304708768702ca60d71e6793b"} Nov 25 13:13:06 crc kubenswrapper[4675]: I1125 13:13:06.698733 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:13:06 crc kubenswrapper[4675]: I1125 13:13:06.699098 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:13:06 crc kubenswrapper[4675]: I1125 13:13:06.751074 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:13:06 crc kubenswrapper[4675]: I1125 13:13:06.835966 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:13:08 crc kubenswrapper[4675]: I1125 13:13:08.924392 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sr7ql"] Nov 25 13:13:08 crc kubenswrapper[4675]: I1125 13:13:08.924642 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sr7ql" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="registry-server" containerID="cri-o://0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28" gracePeriod=2 Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.391285 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.571407 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-catalog-content\") pod \"e5ffa9ba-1724-4874-aacd-05b843f91239\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.571456 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-utilities\") pod \"e5ffa9ba-1724-4874-aacd-05b843f91239\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.571489 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrqzd\" (UniqueName: \"kubernetes.io/projected/e5ffa9ba-1724-4874-aacd-05b843f91239-kube-api-access-wrqzd\") pod \"e5ffa9ba-1724-4874-aacd-05b843f91239\" (UID: \"e5ffa9ba-1724-4874-aacd-05b843f91239\") " Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.573649 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-utilities" (OuterVolumeSpecName: "utilities") pod "e5ffa9ba-1724-4874-aacd-05b843f91239" (UID: "e5ffa9ba-1724-4874-aacd-05b843f91239"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.580101 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ffa9ba-1724-4874-aacd-05b843f91239-kube-api-access-wrqzd" (OuterVolumeSpecName: "kube-api-access-wrqzd") pod "e5ffa9ba-1724-4874-aacd-05b843f91239" (UID: "e5ffa9ba-1724-4874-aacd-05b843f91239"). InnerVolumeSpecName "kube-api-access-wrqzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.635873 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5ffa9ba-1724-4874-aacd-05b843f91239" (UID: "e5ffa9ba-1724-4874-aacd-05b843f91239"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.674055 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.674094 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ffa9ba-1724-4874-aacd-05b843f91239-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.674103 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrqzd\" (UniqueName: \"kubernetes.io/projected/e5ffa9ba-1724-4874-aacd-05b843f91239-kube-api-access-wrqzd\") on node \"crc\" DevicePath \"\"" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.822799 4675 generic.go:334] "Generic (PLEG): container finished" podID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerID="0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28" exitCode=0 Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.822858 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerDied","Data":"0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28"} Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.822888 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr7ql" event={"ID":"e5ffa9ba-1724-4874-aacd-05b843f91239","Type":"ContainerDied","Data":"44bfeba41f140b6ef446b66b1be04ec2191bd1ac514f7284f94652edc0594e47"} Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.823020 4675 scope.go:117] "RemoveContainer" containerID="0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.823300 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr7ql" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.860579 4675 scope.go:117] "RemoveContainer" containerID="cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624" Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.863027 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sr7ql"] Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.871470 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sr7ql"] Nov 25 13:13:09 crc kubenswrapper[4675]: I1125 13:13:09.891656 4675 scope.go:117] "RemoveContainer" containerID="23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab" Nov 25 13:13:11 crc kubenswrapper[4675]: I1125 13:13:11.548474 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" path="/var/lib/kubelet/pods/e5ffa9ba-1724-4874-aacd-05b843f91239/volumes" Nov 25 13:13:13 crc kubenswrapper[4675]: I1125 13:13:13.662579 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:13:13 crc kubenswrapper[4675]: I1125 13:13:13.662850 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:13:15 crc kubenswrapper[4675]: I1125 13:13:15.971306 4675 scope.go:117] "RemoveContainer" containerID="0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28" Nov 25 13:13:15 crc kubenswrapper[4675]: E1125 13:13:15.972047 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28\": container with ID starting with 0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28 not found: ID does not exist" containerID="0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28" Nov 25 13:13:15 crc kubenswrapper[4675]: I1125 13:13:15.972087 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28"} err="failed to get container status \"0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28\": rpc error: code = NotFound desc = could not find container \"0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28\": container with ID starting with 0eb2044d42fc6f96448028fdfa8087bee809060dda3a51f5432c46a48c8aba28 not found: ID does not exist" Nov 25 13:13:15 crc kubenswrapper[4675]: I1125 13:13:15.972119 4675 scope.go:117] "RemoveContainer" containerID="cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624" Nov 25 13:13:15 crc kubenswrapper[4675]: E1125 13:13:15.972676 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624\": container with ID starting with cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624 not found: ID does not exist" containerID="cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624" Nov 25 13:13:15 crc kubenswrapper[4675]: I1125 13:13:15.972711 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624"} err="failed to get container status \"cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624\": rpc error: code = NotFound desc = could not find container \"cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624\": container with ID starting with cbd7672b28e34d63436bc0d73e35f7ef34e76032fcf78c81a437a48f2e5d4624 not found: ID does not exist" Nov 25 13:13:15 crc kubenswrapper[4675]: I1125 13:13:15.972737 4675 scope.go:117] "RemoveContainer" containerID="23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab" Nov 25 13:13:15 crc kubenswrapper[4675]: E1125 13:13:15.973041 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab\": container with ID starting with 23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab not found: ID does not exist" containerID="23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab" Nov 25 13:13:15 crc kubenswrapper[4675]: I1125 13:13:15.973082 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab"} err="failed to get container status \"23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab\": rpc error: code = NotFound desc = could not find container \"23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab\": container with ID starting with 23da714361cb0a7fab9d3d6b28ae355846132fa6e2f687b4f3dd2f85ee14abab not found: ID does not exist" Nov 25 13:13:16 crc kubenswrapper[4675]: I1125 13:13:16.906684 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vgcgs" event={"ID":"ff144f02-0869-43a8-9371-690790fac643","Type":"ContainerStarted","Data":"b83d079cbd255605ae10a621ffec61a3dfc1b9d56fee87658aecee74249b5b00"} Nov 25 13:13:19 crc kubenswrapper[4675]: I1125 13:13:19.946651 4675 generic.go:334] "Generic (PLEG): container finished" podID="ff144f02-0869-43a8-9371-690790fac643" containerID="b83d079cbd255605ae10a621ffec61a3dfc1b9d56fee87658aecee74249b5b00" exitCode=0 Nov 25 13:13:19 crc kubenswrapper[4675]: I1125 13:13:19.946731 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vgcgs" event={"ID":"ff144f02-0869-43a8-9371-690790fac643","Type":"ContainerDied","Data":"b83d079cbd255605ae10a621ffec61a3dfc1b9d56fee87658aecee74249b5b00"} Nov 25 13:13:20 crc kubenswrapper[4675]: I1125 13:13:20.965538 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vgcgs" event={"ID":"ff144f02-0869-43a8-9371-690790fac643","Type":"ContainerStarted","Data":"318760c446d656131b8d2b550ff5f62401a4d698e34babaee4ebff996eba42c8"} Nov 25 13:13:21 crc kubenswrapper[4675]: I1125 13:13:21.011602 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vgcgs" podStartSLOduration=3.441596719 podStartE2EDuration="18.011584826s" podCreationTimestamp="2025-11-25 13:13:03 +0000 UTC" firstStartedPulling="2025-11-25 13:13:05.777589822 +0000 UTC m=+2730.949182163" lastFinishedPulling="2025-11-25 13:13:20.347577929 +0000 UTC m=+2745.519170270" observedRunningTime="2025-11-25 13:13:21.001160925 +0000 UTC m=+2746.172753266" watchObservedRunningTime="2025-11-25 13:13:21.011584826 +0000 UTC m=+2746.183177167" Nov 25 13:13:24 crc kubenswrapper[4675]: I1125 13:13:24.385111 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:24 crc kubenswrapper[4675]: I1125 13:13:24.385793 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:25 crc kubenswrapper[4675]: I1125 13:13:25.445370 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vgcgs" podUID="ff144f02-0869-43a8-9371-690790fac643" containerName="registry-server" probeResult="failure" output=< Nov 25 13:13:25 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:13:25 crc kubenswrapper[4675]: > Nov 25 13:13:34 crc kubenswrapper[4675]: I1125 13:13:34.437201 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:34 crc kubenswrapper[4675]: I1125 13:13:34.496308 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vgcgs" Nov 25 13:13:34 crc kubenswrapper[4675]: I1125 13:13:34.754649 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vgcgs"] Nov 25 13:13:34 crc kubenswrapper[4675]: I1125 13:13:34.939854 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kxkld"] Nov 25 13:13:34 crc kubenswrapper[4675]: I1125 13:13:34.940064 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kxkld" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="registry-server" containerID="cri-o://daed327306aea14da9eb60498e026160f1ce069d1bdde8ab6f8c9e17441e510d" gracePeriod=2 Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.102105 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerID="daed327306aea14da9eb60498e026160f1ce069d1bdde8ab6f8c9e17441e510d" exitCode=0 Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.102168 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerDied","Data":"daed327306aea14da9eb60498e026160f1ce069d1bdde8ab6f8c9e17441e510d"} Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.443902 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.603793 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-utilities\") pod \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.604073 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t6hz\" (UniqueName: \"kubernetes.io/projected/f4d43246-7b39-42d1-8b5d-7255662aaf2d-kube-api-access-8t6hz\") pod \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.604172 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-utilities" (OuterVolumeSpecName: "utilities") pod "f4d43246-7b39-42d1-8b5d-7255662aaf2d" (UID: "f4d43246-7b39-42d1-8b5d-7255662aaf2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.604210 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-catalog-content\") pod \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\" (UID: \"f4d43246-7b39-42d1-8b5d-7255662aaf2d\") " Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.604672 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.625592 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4d43246-7b39-42d1-8b5d-7255662aaf2d-kube-api-access-8t6hz" (OuterVolumeSpecName: "kube-api-access-8t6hz") pod "f4d43246-7b39-42d1-8b5d-7255662aaf2d" (UID: "f4d43246-7b39-42d1-8b5d-7255662aaf2d"). InnerVolumeSpecName "kube-api-access-8t6hz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.707115 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t6hz\" (UniqueName: \"kubernetes.io/projected/f4d43246-7b39-42d1-8b5d-7255662aaf2d-kube-api-access-8t6hz\") on node \"crc\" DevicePath \"\"" Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.757084 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4d43246-7b39-42d1-8b5d-7255662aaf2d" (UID: "f4d43246-7b39-42d1-8b5d-7255662aaf2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:13:35 crc kubenswrapper[4675]: I1125 13:13:35.808620 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4d43246-7b39-42d1-8b5d-7255662aaf2d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.112636 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kxkld" event={"ID":"f4d43246-7b39-42d1-8b5d-7255662aaf2d","Type":"ContainerDied","Data":"61c27810b131b6393bcad274f7398ce9394d95c3c539672968e19b5795def561"} Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.112985 4675 scope.go:117] "RemoveContainer" containerID="daed327306aea14da9eb60498e026160f1ce069d1bdde8ab6f8c9e17441e510d" Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.112681 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kxkld" Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.147921 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kxkld"] Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.149325 4675 scope.go:117] "RemoveContainer" containerID="f87aef11c3eb1cd75c9554b55525e35164afadd2411c61657b3790cd36bf9b73" Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.163042 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kxkld"] Nov 25 13:13:36 crc kubenswrapper[4675]: I1125 13:13:36.198540 4675 scope.go:117] "RemoveContainer" containerID="ad9ab0324601b168fc9f9f7f9116784539cd8b12aa1839a9e7d6249f36c526f3" Nov 25 13:13:37 crc kubenswrapper[4675]: I1125 13:13:37.542428 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" path="/var/lib/kubelet/pods/f4d43246-7b39-42d1-8b5d-7255662aaf2d/volumes" Nov 25 13:13:43 crc kubenswrapper[4675]: I1125 13:13:43.662840 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:13:43 crc kubenswrapper[4675]: I1125 13:13:43.663405 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:14:13 crc kubenswrapper[4675]: I1125 13:14:13.662688 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:14:13 crc kubenswrapper[4675]: I1125 13:14:13.663355 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:14:13 crc kubenswrapper[4675]: I1125 13:14:13.663410 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:14:13 crc kubenswrapper[4675]: I1125 13:14:13.664408 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c850206e7dacec0adcea0a87716c476086670e247da58f26dc8aa843626aa675"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:14:13 crc kubenswrapper[4675]: I1125 13:14:13.664541 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://c850206e7dacec0adcea0a87716c476086670e247da58f26dc8aa843626aa675" gracePeriod=600 Nov 25 13:14:14 crc kubenswrapper[4675]: I1125 13:14:14.464147 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="c850206e7dacec0adcea0a87716c476086670e247da58f26dc8aa843626aa675" exitCode=0 Nov 25 13:14:14 crc kubenswrapper[4675]: I1125 13:14:14.464856 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"c850206e7dacec0adcea0a87716c476086670e247da58f26dc8aa843626aa675"} Nov 25 13:14:14 crc kubenswrapper[4675]: I1125 13:14:14.464898 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba"} Nov 25 13:14:14 crc kubenswrapper[4675]: I1125 13:14:14.464917 4675 scope.go:117] "RemoveContainer" containerID="55f48c9928558f033b0e60fe9cf4de29afe1761b7730073945b0c4d6c8e75935" Nov 25 13:14:30 crc kubenswrapper[4675]: I1125 13:14:30.633394 4675 generic.go:334] "Generic (PLEG): container finished" podID="3ca0e52a-979d-4834-a03d-135355de72db" containerID="c7d72baad23c01a84111487a87dc5505a6029e2fe1e0eaa5e2862fc9031bff6e" exitCode=0 Nov 25 13:14:30 crc kubenswrapper[4675]: I1125 13:14:30.633487 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" event={"ID":"3ca0e52a-979d-4834-a03d-135355de72db","Type":"ContainerDied","Data":"c7d72baad23c01a84111487a87dc5505a6029e2fe1e0eaa5e2862fc9031bff6e"} Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.057176 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.085783 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xj67c\" (UniqueName: \"kubernetes.io/projected/3ca0e52a-979d-4834-a03d-135355de72db-kube-api-access-xj67c\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.085851 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-inventory\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.086021 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ssh-key\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.086043 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-telemetry-combined-ca-bundle\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.086629 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-1\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.086930 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-0\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.086972 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-2\") pod \"3ca0e52a-979d-4834-a03d-135355de72db\" (UID: \"3ca0e52a-979d-4834-a03d-135355de72db\") " Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.101552 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.111349 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ca0e52a-979d-4834-a03d-135355de72db-kube-api-access-xj67c" (OuterVolumeSpecName: "kube-api-access-xj67c") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "kube-api-access-xj67c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.116624 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.119729 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.121134 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.133002 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.141152 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-inventory" (OuterVolumeSpecName: "inventory") pod "3ca0e52a-979d-4834-a03d-135355de72db" (UID: "3ca0e52a-979d-4834-a03d-135355de72db"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.195305 4675 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.195419 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xj67c\" (UniqueName: \"kubernetes.io/projected/3ca0e52a-979d-4834-a03d-135355de72db-kube-api-access-xj67c\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.195500 4675 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.195734 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.195924 4675 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.196011 4675 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.196101 4675 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/3ca0e52a-979d-4834-a03d-135355de72db-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.654662 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" event={"ID":"3ca0e52a-979d-4834-a03d-135355de72db","Type":"ContainerDied","Data":"c0d861b379cc2bd7fd0801c3c95bd6af548b3b19617bd8be1488de846ee3683b"} Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.654915 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0d861b379cc2bd7fd0801c3c95bd6af548b3b19617bd8be1488de846ee3683b" Nov 25 13:14:32 crc kubenswrapper[4675]: I1125 13:14:32.654964 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj" Nov 25 13:14:32 crc kubenswrapper[4675]: E1125 13:14:32.857913 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ca0e52a_979d_4834_a03d_135355de72db.slice/crio-c0d861b379cc2bd7fd0801c3c95bd6af548b3b19617bd8be1488de846ee3683b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ca0e52a_979d_4834_a03d_135355de72db.slice\": RecentStats: unable to find data in memory cache]" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.148623 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g"] Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.150551 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="extract-utilities" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.150649 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="extract-utilities" Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.150748 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="extract-utilities" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.150855 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="extract-utilities" Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.150944 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="extract-content" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151004 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="extract-content" Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.151074 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ca0e52a-979d-4834-a03d-135355de72db" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151131 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ca0e52a-979d-4834-a03d-135355de72db" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.151196 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="registry-server" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151255 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="registry-server" Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.151320 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="registry-server" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151382 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="registry-server" Nov 25 13:15:00 crc kubenswrapper[4675]: E1125 13:15:00.151446 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="extract-content" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151503 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="extract-content" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151770 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4d43246-7b39-42d1-8b5d-7255662aaf2d" containerName="registry-server" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.151903 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ca0e52a-979d-4834-a03d-135355de72db" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.152024 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ffa9ba-1724-4874-aacd-05b843f91239" containerName="registry-server" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.152886 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.159224 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.160462 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g"] Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.165771 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.320632 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgh8f\" (UniqueName: \"kubernetes.io/projected/dd9a055e-f02a-48fd-97ef-848390d5a2b0-kube-api-access-xgh8f\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.321028 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd9a055e-f02a-48fd-97ef-848390d5a2b0-config-volume\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.321151 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd9a055e-f02a-48fd-97ef-848390d5a2b0-secret-volume\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.422915 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgh8f\" (UniqueName: \"kubernetes.io/projected/dd9a055e-f02a-48fd-97ef-848390d5a2b0-kube-api-access-xgh8f\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.423044 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd9a055e-f02a-48fd-97ef-848390d5a2b0-config-volume\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.423106 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd9a055e-f02a-48fd-97ef-848390d5a2b0-secret-volume\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.424201 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd9a055e-f02a-48fd-97ef-848390d5a2b0-config-volume\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.434807 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd9a055e-f02a-48fd-97ef-848390d5a2b0-secret-volume\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.442756 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgh8f\" (UniqueName: \"kubernetes.io/projected/dd9a055e-f02a-48fd-97ef-848390d5a2b0-kube-api-access-xgh8f\") pod \"collect-profiles-29401275-nlk6g\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.492173 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:00 crc kubenswrapper[4675]: I1125 13:15:00.991483 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g"] Nov 25 13:15:01 crc kubenswrapper[4675]: I1125 13:15:01.897605 4675 generic.go:334] "Generic (PLEG): container finished" podID="dd9a055e-f02a-48fd-97ef-848390d5a2b0" containerID="9212224a985797689ea101f94a6881a544b33ae08eebbce43b5c3c0055136fea" exitCode=0 Nov 25 13:15:01 crc kubenswrapper[4675]: I1125 13:15:01.897656 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" event={"ID":"dd9a055e-f02a-48fd-97ef-848390d5a2b0","Type":"ContainerDied","Data":"9212224a985797689ea101f94a6881a544b33ae08eebbce43b5c3c0055136fea"} Nov 25 13:15:01 crc kubenswrapper[4675]: I1125 13:15:01.899180 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" event={"ID":"dd9a055e-f02a-48fd-97ef-848390d5a2b0","Type":"ContainerStarted","Data":"09d2a07354d3660d89654a92a9b75297d16b289026214a9fc25d74e9b40cb179"} Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.249654 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.390200 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgh8f\" (UniqueName: \"kubernetes.io/projected/dd9a055e-f02a-48fd-97ef-848390d5a2b0-kube-api-access-xgh8f\") pod \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.390399 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd9a055e-f02a-48fd-97ef-848390d5a2b0-config-volume\") pod \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.390483 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd9a055e-f02a-48fd-97ef-848390d5a2b0-secret-volume\") pod \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\" (UID: \"dd9a055e-f02a-48fd-97ef-848390d5a2b0\") " Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.391298 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd9a055e-f02a-48fd-97ef-848390d5a2b0-config-volume" (OuterVolumeSpecName: "config-volume") pod "dd9a055e-f02a-48fd-97ef-848390d5a2b0" (UID: "dd9a055e-f02a-48fd-97ef-848390d5a2b0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.396497 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd9a055e-f02a-48fd-97ef-848390d5a2b0-kube-api-access-xgh8f" (OuterVolumeSpecName: "kube-api-access-xgh8f") pod "dd9a055e-f02a-48fd-97ef-848390d5a2b0" (UID: "dd9a055e-f02a-48fd-97ef-848390d5a2b0"). InnerVolumeSpecName "kube-api-access-xgh8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.402890 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd9a055e-f02a-48fd-97ef-848390d5a2b0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dd9a055e-f02a-48fd-97ef-848390d5a2b0" (UID: "dd9a055e-f02a-48fd-97ef-848390d5a2b0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.492230 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgh8f\" (UniqueName: \"kubernetes.io/projected/dd9a055e-f02a-48fd-97ef-848390d5a2b0-kube-api-access-xgh8f\") on node \"crc\" DevicePath \"\"" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.492485 4675 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd9a055e-f02a-48fd-97ef-848390d5a2b0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.492544 4675 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd9a055e-f02a-48fd-97ef-848390d5a2b0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 13:15:03 crc kubenswrapper[4675]: E1125 13:15:03.624349 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd9a055e_f02a_48fd_97ef_848390d5a2b0.slice/crio-09d2a07354d3660d89654a92a9b75297d16b289026214a9fc25d74e9b40cb179\": RecentStats: unable to find data in memory cache]" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.916053 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" event={"ID":"dd9a055e-f02a-48fd-97ef-848390d5a2b0","Type":"ContainerDied","Data":"09d2a07354d3660d89654a92a9b75297d16b289026214a9fc25d74e9b40cb179"} Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.916102 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09d2a07354d3660d89654a92a9b75297d16b289026214a9fc25d74e9b40cb179" Nov 25 13:15:03 crc kubenswrapper[4675]: I1125 13:15:03.916152 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401275-nlk6g" Nov 25 13:15:04 crc kubenswrapper[4675]: I1125 13:15:04.322721 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg"] Nov 25 13:15:04 crc kubenswrapper[4675]: I1125 13:15:04.331070 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401230-8s8kg"] Nov 25 13:15:05 crc kubenswrapper[4675]: I1125 13:15:05.544682 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c198829-c4dc-459f-b29a-e705390ef9eb" path="/var/lib/kubelet/pods/2c198829-c4dc-459f-b29a-e705390ef9eb/volumes" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.943749 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 13:15:29 crc kubenswrapper[4675]: E1125 13:15:29.944706 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd9a055e-f02a-48fd-97ef-848390d5a2b0" containerName="collect-profiles" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.944719 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd9a055e-f02a-48fd-97ef-848390d5a2b0" containerName="collect-profiles" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.944916 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd9a055e-f02a-48fd-97ef-848390d5a2b0" containerName="collect-profiles" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.945536 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.949431 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.949428 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.949431 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.949711 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-pfpzw" Nov 25 13:15:29 crc kubenswrapper[4675]: I1125 13:15:29.956927 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.074607 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.074689 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.074754 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.074850 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89fgr\" (UniqueName: \"kubernetes.io/projected/2beee9f5-8487-4f64-a55c-11f32c68c5fc-kube-api-access-89fgr\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.074879 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.076558 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-config-data\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.076731 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.077431 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.077696 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180193 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180275 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180308 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89fgr\" (UniqueName: \"kubernetes.io/projected/2beee9f5-8487-4f64-a55c-11f32c68c5fc-kube-api-access-89fgr\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180335 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180401 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-config-data\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180423 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180471 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180508 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180577 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180762 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.180805 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.181384 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.182260 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.189749 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-config-data\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.190622 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.191933 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.192423 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.199514 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89fgr\" (UniqueName: \"kubernetes.io/projected/2beee9f5-8487-4f64-a55c-11f32c68c5fc-kube-api-access-89fgr\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.222095 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.275959 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.738642 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 13:15:30 crc kubenswrapper[4675]: I1125 13:15:30.746006 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:15:31 crc kubenswrapper[4675]: I1125 13:15:31.156713 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2beee9f5-8487-4f64-a55c-11f32c68c5fc","Type":"ContainerStarted","Data":"53bb6966f44b9b294ac4adfa805bd6728cefe0a8830e9c1589573d39761273d6"} Nov 25 13:15:47 crc kubenswrapper[4675]: I1125 13:15:47.400049 4675 scope.go:117] "RemoveContainer" containerID="0ef735e5c248dfbecefa8320d20ae1aaa94227042eecb2ed966a55f462e1b135" Nov 25 13:16:06 crc kubenswrapper[4675]: E1125 13:16:06.841744 4675 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 13:16:06 crc kubenswrapper[4675]: E1125 13:16:06.843072 4675 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-89fgr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(2beee9f5-8487-4f64-a55c-11f32c68c5fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 13:16:06 crc kubenswrapper[4675]: E1125 13:16:06.844305 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="2beee9f5-8487-4f64-a55c-11f32c68c5fc" Nov 25 13:16:07 crc kubenswrapper[4675]: E1125 13:16:07.538258 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="2beee9f5-8487-4f64-a55c-11f32c68c5fc" Nov 25 13:16:13 crc kubenswrapper[4675]: I1125 13:16:13.662193 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:16:13 crc kubenswrapper[4675]: I1125 13:16:13.662703 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:16:20 crc kubenswrapper[4675]: I1125 13:16:20.013751 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 13:16:21 crc kubenswrapper[4675]: I1125 13:16:21.661105 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2beee9f5-8487-4f64-a55c-11f32c68c5fc","Type":"ContainerStarted","Data":"17619b5908d1220877c57d893f49f01c2885f2d929d98002d125bbacf5a2d79e"} Nov 25 13:16:21 crc kubenswrapper[4675]: I1125 13:16:21.681852 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.416347316 podStartE2EDuration="53.681833632s" podCreationTimestamp="2025-11-25 13:15:28 +0000 UTC" firstStartedPulling="2025-11-25 13:15:30.745809255 +0000 UTC m=+2875.917401596" lastFinishedPulling="2025-11-25 13:16:20.011295571 +0000 UTC m=+2925.182887912" observedRunningTime="2025-11-25 13:16:21.680282034 +0000 UTC m=+2926.851874375" watchObservedRunningTime="2025-11-25 13:16:21.681833632 +0000 UTC m=+2926.853425973" Nov 25 13:16:43 crc kubenswrapper[4675]: I1125 13:16:43.662068 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:16:43 crc kubenswrapper[4675]: I1125 13:16:43.662598 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:17:13 crc kubenswrapper[4675]: I1125 13:17:13.662932 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:17:13 crc kubenswrapper[4675]: I1125 13:17:13.663450 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:17:13 crc kubenswrapper[4675]: I1125 13:17:13.663496 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:17:13 crc kubenswrapper[4675]: I1125 13:17:13.664257 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:17:13 crc kubenswrapper[4675]: I1125 13:17:13.664316 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" gracePeriod=600 Nov 25 13:17:13 crc kubenswrapper[4675]: E1125 13:17:13.820515 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:17:14 crc kubenswrapper[4675]: I1125 13:17:14.128123 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" exitCode=0 Nov 25 13:17:14 crc kubenswrapper[4675]: I1125 13:17:14.128168 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba"} Nov 25 13:17:14 crc kubenswrapper[4675]: I1125 13:17:14.128203 4675 scope.go:117] "RemoveContainer" containerID="c850206e7dacec0adcea0a87716c476086670e247da58f26dc8aa843626aa675" Nov 25 13:17:14 crc kubenswrapper[4675]: I1125 13:17:14.128697 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:17:14 crc kubenswrapper[4675]: E1125 13:17:14.129044 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:17:27 crc kubenswrapper[4675]: I1125 13:17:27.532370 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:17:27 crc kubenswrapper[4675]: E1125 13:17:27.533302 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:17:42 crc kubenswrapper[4675]: I1125 13:17:42.533284 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:17:42 crc kubenswrapper[4675]: E1125 13:17:42.534030 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:17:53 crc kubenswrapper[4675]: I1125 13:17:53.532666 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:17:53 crc kubenswrapper[4675]: E1125 13:17:53.533475 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:18:04 crc kubenswrapper[4675]: I1125 13:18:04.532049 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:18:04 crc kubenswrapper[4675]: E1125 13:18:04.534474 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:18:18 crc kubenswrapper[4675]: I1125 13:18:18.533321 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:18:18 crc kubenswrapper[4675]: E1125 13:18:18.534044 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:18:32 crc kubenswrapper[4675]: I1125 13:18:32.532691 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:18:32 crc kubenswrapper[4675]: E1125 13:18:32.533430 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:18:46 crc kubenswrapper[4675]: I1125 13:18:46.532365 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:18:46 crc kubenswrapper[4675]: E1125 13:18:46.533262 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:18:59 crc kubenswrapper[4675]: I1125 13:18:59.533601 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:18:59 crc kubenswrapper[4675]: E1125 13:18:59.536476 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:19:13 crc kubenswrapper[4675]: I1125 13:19:13.532354 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:19:13 crc kubenswrapper[4675]: E1125 13:19:13.533168 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:19:24 crc kubenswrapper[4675]: I1125 13:19:24.533132 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:19:24 crc kubenswrapper[4675]: E1125 13:19:24.533967 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:19:37 crc kubenswrapper[4675]: I1125 13:19:37.536957 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:19:37 crc kubenswrapper[4675]: E1125 13:19:37.537762 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:19:48 crc kubenswrapper[4675]: I1125 13:19:48.532860 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:19:48 crc kubenswrapper[4675]: E1125 13:19:48.533856 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:19:59 crc kubenswrapper[4675]: I1125 13:19:59.532971 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:19:59 crc kubenswrapper[4675]: E1125 13:19:59.533779 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:20:10 crc kubenswrapper[4675]: I1125 13:20:10.532338 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:20:10 crc kubenswrapper[4675]: E1125 13:20:10.533144 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:20:24 crc kubenswrapper[4675]: I1125 13:20:24.534163 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:20:24 crc kubenswrapper[4675]: E1125 13:20:24.535075 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:20:38 crc kubenswrapper[4675]: I1125 13:20:38.532863 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:20:38 crc kubenswrapper[4675]: E1125 13:20:38.533586 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:20:51 crc kubenswrapper[4675]: I1125 13:20:51.533146 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:20:51 crc kubenswrapper[4675]: E1125 13:20:51.533884 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:21:03 crc kubenswrapper[4675]: I1125 13:21:03.537063 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:21:03 crc kubenswrapper[4675]: E1125 13:21:03.537908 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:21:14 crc kubenswrapper[4675]: I1125 13:21:14.532077 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:21:14 crc kubenswrapper[4675]: E1125 13:21:14.532994 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:21:25 crc kubenswrapper[4675]: I1125 13:21:25.532781 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:21:25 crc kubenswrapper[4675]: E1125 13:21:25.533620 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:21:40 crc kubenswrapper[4675]: I1125 13:21:40.532852 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:21:40 crc kubenswrapper[4675]: E1125 13:21:40.533746 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:21:55 crc kubenswrapper[4675]: I1125 13:21:55.541556 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:21:55 crc kubenswrapper[4675]: E1125 13:21:55.542433 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.038713 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f9sgx"] Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.041019 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.056719 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f9sgx"] Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.238198 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-catalog-content\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.238264 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bv7g\" (UniqueName: \"kubernetes.io/projected/2c280461-76a3-4bb9-9fa6-59fc742e8596-kube-api-access-5bv7g\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.238328 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-utilities\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.339793 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-utilities\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.340297 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-catalog-content\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.340342 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bv7g\" (UniqueName: \"kubernetes.io/projected/2c280461-76a3-4bb9-9fa6-59fc742e8596-kube-api-access-5bv7g\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.340376 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-utilities\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.340874 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-catalog-content\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.378682 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bv7g\" (UniqueName: \"kubernetes.io/projected/2c280461-76a3-4bb9-9fa6-59fc742e8596-kube-api-access-5bv7g\") pod \"certified-operators-f9sgx\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:00 crc kubenswrapper[4675]: I1125 13:22:00.663010 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:01 crc kubenswrapper[4675]: I1125 13:22:01.499849 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f9sgx"] Nov 25 13:22:01 crc kubenswrapper[4675]: I1125 13:22:01.660215 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerStarted","Data":"0ac4ae62ad9e56efb7976be33c2f31fbb8f9c32ee4f1bcf52e63d03bafa0f7cd"} Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.440586 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8hz6j"] Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.442927 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.458945 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hz6j"] Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.584091 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-catalog-content\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.584198 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ks62\" (UniqueName: \"kubernetes.io/projected/35a7202a-5f2a-42a7-af32-962de311245c-kube-api-access-5ks62\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.584278 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-utilities\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.670787 4675 generic.go:334] "Generic (PLEG): container finished" podID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerID="ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82" exitCode=0 Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.670861 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerDied","Data":"ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82"} Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.672959 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.686400 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-utilities\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.686667 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-catalog-content\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.686740 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ks62\" (UniqueName: \"kubernetes.io/projected/35a7202a-5f2a-42a7-af32-962de311245c-kube-api-access-5ks62\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.688359 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-catalog-content\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.688499 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-utilities\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.714650 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ks62\" (UniqueName: \"kubernetes.io/projected/35a7202a-5f2a-42a7-af32-962de311245c-kube-api-access-5ks62\") pod \"redhat-marketplace-8hz6j\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:02 crc kubenswrapper[4675]: I1125 13:22:02.760653 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:03 crc kubenswrapper[4675]: I1125 13:22:03.242716 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hz6j"] Nov 25 13:22:03 crc kubenswrapper[4675]: W1125 13:22:03.249489 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35a7202a_5f2a_42a7_af32_962de311245c.slice/crio-55337798ef9a84010cd86b6026b62050f3a39dfb632375d17c269d562532f106 WatchSource:0}: Error finding container 55337798ef9a84010cd86b6026b62050f3a39dfb632375d17c269d562532f106: Status 404 returned error can't find the container with id 55337798ef9a84010cd86b6026b62050f3a39dfb632375d17c269d562532f106 Nov 25 13:22:03 crc kubenswrapper[4675]: I1125 13:22:03.681508 4675 generic.go:334] "Generic (PLEG): container finished" podID="35a7202a-5f2a-42a7-af32-962de311245c" containerID="fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef" exitCode=0 Nov 25 13:22:03 crc kubenswrapper[4675]: I1125 13:22:03.681729 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerDied","Data":"fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef"} Nov 25 13:22:03 crc kubenswrapper[4675]: I1125 13:22:03.681773 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerStarted","Data":"55337798ef9a84010cd86b6026b62050f3a39dfb632375d17c269d562532f106"} Nov 25 13:22:04 crc kubenswrapper[4675]: I1125 13:22:04.690872 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerStarted","Data":"e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e"} Nov 25 13:22:04 crc kubenswrapper[4675]: I1125 13:22:04.693554 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerStarted","Data":"846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092"} Nov 25 13:22:05 crc kubenswrapper[4675]: I1125 13:22:05.703422 4675 generic.go:334] "Generic (PLEG): container finished" podID="35a7202a-5f2a-42a7-af32-962de311245c" containerID="e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e" exitCode=0 Nov 25 13:22:05 crc kubenswrapper[4675]: I1125 13:22:05.703508 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerDied","Data":"e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e"} Nov 25 13:22:05 crc kubenswrapper[4675]: I1125 13:22:05.706581 4675 generic.go:334] "Generic (PLEG): container finished" podID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerID="846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092" exitCode=0 Nov 25 13:22:05 crc kubenswrapper[4675]: I1125 13:22:05.706635 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerDied","Data":"846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092"} Nov 25 13:22:06 crc kubenswrapper[4675]: I1125 13:22:06.717296 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerStarted","Data":"b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72"} Nov 25 13:22:06 crc kubenswrapper[4675]: I1125 13:22:06.721957 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerStarted","Data":"ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9"} Nov 25 13:22:06 crc kubenswrapper[4675]: I1125 13:22:06.739008 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8hz6j" podStartSLOduration=2.308236012 podStartE2EDuration="4.738991075s" podCreationTimestamp="2025-11-25 13:22:02 +0000 UTC" firstStartedPulling="2025-11-25 13:22:03.684743465 +0000 UTC m=+3268.856335806" lastFinishedPulling="2025-11-25 13:22:06.115498528 +0000 UTC m=+3271.287090869" observedRunningTime="2025-11-25 13:22:06.736244029 +0000 UTC m=+3271.907836380" watchObservedRunningTime="2025-11-25 13:22:06.738991075 +0000 UTC m=+3271.910583416" Nov 25 13:22:06 crc kubenswrapper[4675]: I1125 13:22:06.761694 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f9sgx" podStartSLOduration=3.276966225 podStartE2EDuration="6.761672928s" podCreationTimestamp="2025-11-25 13:22:00 +0000 UTC" firstStartedPulling="2025-11-25 13:22:02.672740653 +0000 UTC m=+3267.844332994" lastFinishedPulling="2025-11-25 13:22:06.157447356 +0000 UTC m=+3271.329039697" observedRunningTime="2025-11-25 13:22:06.759068866 +0000 UTC m=+3271.930661227" watchObservedRunningTime="2025-11-25 13:22:06.761672928 +0000 UTC m=+3271.933265269" Nov 25 13:22:09 crc kubenswrapper[4675]: I1125 13:22:09.532944 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:22:09 crc kubenswrapper[4675]: E1125 13:22:09.533471 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:22:10 crc kubenswrapper[4675]: I1125 13:22:10.663398 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:10 crc kubenswrapper[4675]: I1125 13:22:10.663451 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:10 crc kubenswrapper[4675]: I1125 13:22:10.717116 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:12 crc kubenswrapper[4675]: I1125 13:22:12.761674 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:12 crc kubenswrapper[4675]: I1125 13:22:12.762022 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:12 crc kubenswrapper[4675]: I1125 13:22:12.824759 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:12 crc kubenswrapper[4675]: I1125 13:22:12.878528 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.225385 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hz6j"] Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.225950 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8hz6j" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="registry-server" containerID="cri-o://b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72" gracePeriod=2 Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.716067 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.819845 4675 generic.go:334] "Generic (PLEG): container finished" podID="35a7202a-5f2a-42a7-af32-962de311245c" containerID="b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72" exitCode=0 Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.819885 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerDied","Data":"b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72"} Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.819910 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hz6j" event={"ID":"35a7202a-5f2a-42a7-af32-962de311245c","Type":"ContainerDied","Data":"55337798ef9a84010cd86b6026b62050f3a39dfb632375d17c269d562532f106"} Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.819926 4675 scope.go:117] "RemoveContainer" containerID="b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.820063 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8hz6j" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.844421 4675 scope.go:117] "RemoveContainer" containerID="e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.851317 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-utilities\") pod \"35a7202a-5f2a-42a7-af32-962de311245c\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.851388 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-catalog-content\") pod \"35a7202a-5f2a-42a7-af32-962de311245c\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.851429 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ks62\" (UniqueName: \"kubernetes.io/projected/35a7202a-5f2a-42a7-af32-962de311245c-kube-api-access-5ks62\") pod \"35a7202a-5f2a-42a7-af32-962de311245c\" (UID: \"35a7202a-5f2a-42a7-af32-962de311245c\") " Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.860291 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-utilities" (OuterVolumeSpecName: "utilities") pod "35a7202a-5f2a-42a7-af32-962de311245c" (UID: "35a7202a-5f2a-42a7-af32-962de311245c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.867452 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35a7202a-5f2a-42a7-af32-962de311245c-kube-api-access-5ks62" (OuterVolumeSpecName: "kube-api-access-5ks62") pod "35a7202a-5f2a-42a7-af32-962de311245c" (UID: "35a7202a-5f2a-42a7-af32-962de311245c"). InnerVolumeSpecName "kube-api-access-5ks62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.869345 4675 scope.go:117] "RemoveContainer" containerID="fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.880403 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35a7202a-5f2a-42a7-af32-962de311245c" (UID: "35a7202a-5f2a-42a7-af32-962de311245c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.948899 4675 scope.go:117] "RemoveContainer" containerID="b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72" Nov 25 13:22:16 crc kubenswrapper[4675]: E1125 13:22:16.949361 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72\": container with ID starting with b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72 not found: ID does not exist" containerID="b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.949423 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72"} err="failed to get container status \"b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72\": rpc error: code = NotFound desc = could not find container \"b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72\": container with ID starting with b791dd1f0634d9e393f4f7c314e0399eeb4f0f460f70080adc8c937981417a72 not found: ID does not exist" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.949466 4675 scope.go:117] "RemoveContainer" containerID="e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e" Nov 25 13:22:16 crc kubenswrapper[4675]: E1125 13:22:16.949712 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e\": container with ID starting with e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e not found: ID does not exist" containerID="e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.949758 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e"} err="failed to get container status \"e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e\": rpc error: code = NotFound desc = could not find container \"e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e\": container with ID starting with e3d1fd7c87665c165cbc820f77044b5a4426e2ee70fdcbb64b40d188db5e559e not found: ID does not exist" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.949778 4675 scope.go:117] "RemoveContainer" containerID="fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef" Nov 25 13:22:16 crc kubenswrapper[4675]: E1125 13:22:16.950028 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef\": container with ID starting with fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef not found: ID does not exist" containerID="fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.950050 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef"} err="failed to get container status \"fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef\": rpc error: code = NotFound desc = could not find container \"fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef\": container with ID starting with fe085657a1cf2ba0cd1ec4ec10435fe2f46566dc2f2d83d954b17dcf293124ef not found: ID does not exist" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.960795 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.960843 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35a7202a-5f2a-42a7-af32-962de311245c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:16 crc kubenswrapper[4675]: I1125 13:22:16.960857 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ks62\" (UniqueName: \"kubernetes.io/projected/35a7202a-5f2a-42a7-af32-962de311245c-kube-api-access-5ks62\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:17 crc kubenswrapper[4675]: I1125 13:22:17.153859 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hz6j"] Nov 25 13:22:17 crc kubenswrapper[4675]: I1125 13:22:17.169515 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hz6j"] Nov 25 13:22:17 crc kubenswrapper[4675]: I1125 13:22:17.544246 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35a7202a-5f2a-42a7-af32-962de311245c" path="/var/lib/kubelet/pods/35a7202a-5f2a-42a7-af32-962de311245c/volumes" Nov 25 13:22:20 crc kubenswrapper[4675]: I1125 13:22:20.704832 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.224644 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f9sgx"] Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.224939 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f9sgx" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="registry-server" containerID="cri-o://ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9" gracePeriod=2 Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.704798 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.855128 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-catalog-content\") pod \"2c280461-76a3-4bb9-9fa6-59fc742e8596\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.855251 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bv7g\" (UniqueName: \"kubernetes.io/projected/2c280461-76a3-4bb9-9fa6-59fc742e8596-kube-api-access-5bv7g\") pod \"2c280461-76a3-4bb9-9fa6-59fc742e8596\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.855282 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-utilities\") pod \"2c280461-76a3-4bb9-9fa6-59fc742e8596\" (UID: \"2c280461-76a3-4bb9-9fa6-59fc742e8596\") " Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.856248 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-utilities" (OuterVolumeSpecName: "utilities") pod "2c280461-76a3-4bb9-9fa6-59fc742e8596" (UID: "2c280461-76a3-4bb9-9fa6-59fc742e8596"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.870865 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c280461-76a3-4bb9-9fa6-59fc742e8596-kube-api-access-5bv7g" (OuterVolumeSpecName: "kube-api-access-5bv7g") pod "2c280461-76a3-4bb9-9fa6-59fc742e8596" (UID: "2c280461-76a3-4bb9-9fa6-59fc742e8596"). InnerVolumeSpecName "kube-api-access-5bv7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.882380 4675 generic.go:334] "Generic (PLEG): container finished" podID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerID="ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9" exitCode=0 Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.882418 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerDied","Data":"ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9"} Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.882443 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f9sgx" event={"ID":"2c280461-76a3-4bb9-9fa6-59fc742e8596","Type":"ContainerDied","Data":"0ac4ae62ad9e56efb7976be33c2f31fbb8f9c32ee4f1bcf52e63d03bafa0f7cd"} Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.882457 4675 scope.go:117] "RemoveContainer" containerID="ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.882558 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f9sgx" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.907258 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c280461-76a3-4bb9-9fa6-59fc742e8596" (UID: "2c280461-76a3-4bb9-9fa6-59fc742e8596"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.920116 4675 scope.go:117] "RemoveContainer" containerID="846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.950355 4675 scope.go:117] "RemoveContainer" containerID="ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.958052 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.958102 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bv7g\" (UniqueName: \"kubernetes.io/projected/2c280461-76a3-4bb9-9fa6-59fc742e8596-kube-api-access-5bv7g\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.958136 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c280461-76a3-4bb9-9fa6-59fc742e8596-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.986016 4675 scope.go:117] "RemoveContainer" containerID="ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9" Nov 25 13:22:21 crc kubenswrapper[4675]: E1125 13:22:21.986479 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9\": container with ID starting with ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9 not found: ID does not exist" containerID="ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.986517 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9"} err="failed to get container status \"ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9\": rpc error: code = NotFound desc = could not find container \"ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9\": container with ID starting with ed305a0259761cdcc965ba7e41b59b9248502cc9620c6eea5d2240ab61dd78d9 not found: ID does not exist" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.986542 4675 scope.go:117] "RemoveContainer" containerID="846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092" Nov 25 13:22:21 crc kubenswrapper[4675]: E1125 13:22:21.998974 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092\": container with ID starting with 846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092 not found: ID does not exist" containerID="846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.999025 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092"} err="failed to get container status \"846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092\": rpc error: code = NotFound desc = could not find container \"846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092\": container with ID starting with 846184e9137e78c657ab842f52d2bf0f1bd13515b35ef4f0901d3ac49642f092 not found: ID does not exist" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.999052 4675 scope.go:117] "RemoveContainer" containerID="ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82" Nov 25 13:22:21 crc kubenswrapper[4675]: E1125 13:22:21.999639 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82\": container with ID starting with ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82 not found: ID does not exist" containerID="ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82" Nov 25 13:22:21 crc kubenswrapper[4675]: I1125 13:22:21.999772 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82"} err="failed to get container status \"ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82\": rpc error: code = NotFound desc = could not find container \"ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82\": container with ID starting with ad109c8254aa9514df70977341d905af9ce06e93e2e9b7125dbb116a01288a82 not found: ID does not exist" Nov 25 13:22:22 crc kubenswrapper[4675]: I1125 13:22:22.213673 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f9sgx"] Nov 25 13:22:22 crc kubenswrapper[4675]: I1125 13:22:22.223456 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f9sgx"] Nov 25 13:22:23 crc kubenswrapper[4675]: I1125 13:22:23.533166 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:22:23 crc kubenswrapper[4675]: I1125 13:22:23.545298 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" path="/var/lib/kubelet/pods/2c280461-76a3-4bb9-9fa6-59fc742e8596/volumes" Nov 25 13:22:23 crc kubenswrapper[4675]: I1125 13:22:23.901326 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"2803df1792774157acda9ae9775048cc478a841249be56ff9bc1791032dca380"} Nov 25 13:22:29 crc kubenswrapper[4675]: I1125 13:22:29.968222 4675 generic.go:334] "Generic (PLEG): container finished" podID="2beee9f5-8487-4f64-a55c-11f32c68c5fc" containerID="17619b5908d1220877c57d893f49f01c2885f2d929d98002d125bbacf5a2d79e" exitCode=0 Nov 25 13:22:29 crc kubenswrapper[4675]: I1125 13:22:29.968270 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2beee9f5-8487-4f64-a55c-11f32c68c5fc","Type":"ContainerDied","Data":"17619b5908d1220877c57d893f49f01c2885f2d929d98002d125bbacf5a2d79e"} Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.335755 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435083 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89fgr\" (UniqueName: \"kubernetes.io/projected/2beee9f5-8487-4f64-a55c-11f32c68c5fc-kube-api-access-89fgr\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435137 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435221 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-config-data\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435258 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-workdir\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435418 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ssh-key\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435469 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-temporary\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435509 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config-secret\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435555 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ca-certs\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.435617 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config\") pod \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\" (UID: \"2beee9f5-8487-4f64-a55c-11f32c68c5fc\") " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.437447 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.437897 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-config-data" (OuterVolumeSpecName: "config-data") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.441804 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.442235 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "test-operator-logs") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.443343 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2beee9f5-8487-4f64-a55c-11f32c68c5fc-kube-api-access-89fgr" (OuterVolumeSpecName: "kube-api-access-89fgr") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "kube-api-access-89fgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.464126 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.469490 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.471638 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.499608 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "2beee9f5-8487-4f64-a55c-11f32c68c5fc" (UID: "2beee9f5-8487-4f64-a55c-11f32c68c5fc"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537898 4675 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537929 4675 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537941 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537951 4675 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2beee9f5-8487-4f64-a55c-11f32c68c5fc-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537960 4675 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537968 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89fgr\" (UniqueName: \"kubernetes.io/projected/2beee9f5-8487-4f64-a55c-11f32c68c5fc-kube-api-access-89fgr\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.537992 4675 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.538003 4675 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2beee9f5-8487-4f64-a55c-11f32c68c5fc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.538012 4675 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2beee9f5-8487-4f64-a55c-11f32c68c5fc-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.557718 4675 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.639770 4675 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.986870 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2beee9f5-8487-4f64-a55c-11f32c68c5fc","Type":"ContainerDied","Data":"53bb6966f44b9b294ac4adfa805bd6728cefe0a8830e9c1589573d39761273d6"} Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.987190 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53bb6966f44b9b294ac4adfa805bd6728cefe0a8830e9c1589573d39761273d6" Nov 25 13:22:31 crc kubenswrapper[4675]: I1125 13:22:31.987257 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.958886 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959712 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="extract-content" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959725 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="extract-content" Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959738 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="extract-utilities" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959744 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="extract-utilities" Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959760 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="registry-server" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959767 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="registry-server" Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959782 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="extract-content" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959787 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="extract-content" Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959801 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2beee9f5-8487-4f64-a55c-11f32c68c5fc" containerName="tempest-tests-tempest-tests-runner" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959807 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2beee9f5-8487-4f64-a55c-11f32c68c5fc" containerName="tempest-tests-tempest-tests-runner" Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959834 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="registry-server" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959841 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="registry-server" Nov 25 13:22:41 crc kubenswrapper[4675]: E1125 13:22:41.959866 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="extract-utilities" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.959873 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="extract-utilities" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.960102 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="35a7202a-5f2a-42a7-af32-962de311245c" containerName="registry-server" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.960123 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c280461-76a3-4bb9-9fa6-59fc742e8596" containerName="registry-server" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.960141 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="2beee9f5-8487-4f64-a55c-11f32c68c5fc" containerName="tempest-tests-tempest-tests-runner" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.960847 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.963078 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-pfpzw" Nov 25 13:22:41 crc kubenswrapper[4675]: I1125 13:22:41.967963 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.022847 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.022886 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmhbq\" (UniqueName: \"kubernetes.io/projected/ac759df6-989d-47da-9259-b6d00e9e566e-kube-api-access-mmhbq\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.125190 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.125520 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmhbq\" (UniqueName: \"kubernetes.io/projected/ac759df6-989d-47da-9259-b6d00e9e566e-kube-api-access-mmhbq\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.125683 4675 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.145946 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmhbq\" (UniqueName: \"kubernetes.io/projected/ac759df6-989d-47da-9259-b6d00e9e566e-kube-api-access-mmhbq\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.157917 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ac759df6-989d-47da-9259-b6d00e9e566e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.284856 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 13:22:42 crc kubenswrapper[4675]: I1125 13:22:42.895080 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 13:22:42 crc kubenswrapper[4675]: W1125 13:22:42.902961 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac759df6_989d_47da_9259_b6d00e9e566e.slice/crio-f73963f2ab28a65fa1db66229e1e27dc6fab6c384b04365d3f9ad680be698132 WatchSource:0}: Error finding container f73963f2ab28a65fa1db66229e1e27dc6fab6c384b04365d3f9ad680be698132: Status 404 returned error can't find the container with id f73963f2ab28a65fa1db66229e1e27dc6fab6c384b04365d3f9ad680be698132 Nov 25 13:22:43 crc kubenswrapper[4675]: I1125 13:22:43.076443 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"ac759df6-989d-47da-9259-b6d00e9e566e","Type":"ContainerStarted","Data":"f73963f2ab28a65fa1db66229e1e27dc6fab6c384b04365d3f9ad680be698132"} Nov 25 13:22:45 crc kubenswrapper[4675]: I1125 13:22:45.094088 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"ac759df6-989d-47da-9259-b6d00e9e566e","Type":"ContainerStarted","Data":"e744e58c14f358d788dd78eccef366633794f9bab24c3a3a1d2e1099cf925fae"} Nov 25 13:22:45 crc kubenswrapper[4675]: I1125 13:22:45.114565 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=3.110665854 podStartE2EDuration="4.114545222s" podCreationTimestamp="2025-11-25 13:22:41 +0000 UTC" firstStartedPulling="2025-11-25 13:22:42.90667091 +0000 UTC m=+3308.078263251" lastFinishedPulling="2025-11-25 13:22:43.910550278 +0000 UTC m=+3309.082142619" observedRunningTime="2025-11-25 13:22:45.10620512 +0000 UTC m=+3310.277797481" watchObservedRunningTime="2025-11-25 13:22:45.114545222 +0000 UTC m=+3310.286137553" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.092009 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6smlh/must-gather-kbvj9"] Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.124706 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.134626 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6smlh/must-gather-kbvj9"] Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.135023 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3697c881-eee5-4d41-856b-8a8064d1cf28-must-gather-output\") pod \"must-gather-kbvj9\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.135145 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw2pb\" (UniqueName: \"kubernetes.io/projected/3697c881-eee5-4d41-856b-8a8064d1cf28-kube-api-access-kw2pb\") pod \"must-gather-kbvj9\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.138258 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6smlh"/"kube-root-ca.crt" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.138465 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6smlh"/"default-dockercfg-7lx52" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.138605 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6smlh"/"openshift-service-ca.crt" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.237909 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3697c881-eee5-4d41-856b-8a8064d1cf28-must-gather-output\") pod \"must-gather-kbvj9\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.238027 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw2pb\" (UniqueName: \"kubernetes.io/projected/3697c881-eee5-4d41-856b-8a8064d1cf28-kube-api-access-kw2pb\") pod \"must-gather-kbvj9\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.239079 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3697c881-eee5-4d41-856b-8a8064d1cf28-must-gather-output\") pod \"must-gather-kbvj9\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.275214 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw2pb\" (UniqueName: \"kubernetes.io/projected/3697c881-eee5-4d41-856b-8a8064d1cf28-kube-api-access-kw2pb\") pod \"must-gather-kbvj9\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:07 crc kubenswrapper[4675]: I1125 13:23:07.472459 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:23:08 crc kubenswrapper[4675]: I1125 13:23:07.966056 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6smlh/must-gather-kbvj9"] Nov 25 13:23:08 crc kubenswrapper[4675]: I1125 13:23:08.301170 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/must-gather-kbvj9" event={"ID":"3697c881-eee5-4d41-856b-8a8064d1cf28","Type":"ContainerStarted","Data":"c77576a4e743130788480a6e260f5b33f8b743422a0232819257f8a66dd28bee"} Nov 25 13:23:12 crc kubenswrapper[4675]: I1125 13:23:12.341991 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/must-gather-kbvj9" event={"ID":"3697c881-eee5-4d41-856b-8a8064d1cf28","Type":"ContainerStarted","Data":"13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97"} Nov 25 13:23:13 crc kubenswrapper[4675]: I1125 13:23:13.352249 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/must-gather-kbvj9" event={"ID":"3697c881-eee5-4d41-856b-8a8064d1cf28","Type":"ContainerStarted","Data":"86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b"} Nov 25 13:23:13 crc kubenswrapper[4675]: I1125 13:23:13.367681 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6smlh/must-gather-kbvj9" podStartSLOduration=3.4126856 podStartE2EDuration="7.367661948s" podCreationTimestamp="2025-11-25 13:23:06 +0000 UTC" firstStartedPulling="2025-11-25 13:23:07.9850353 +0000 UTC m=+3333.156627651" lastFinishedPulling="2025-11-25 13:23:11.940011658 +0000 UTC m=+3337.111603999" observedRunningTime="2025-11-25 13:23:13.365958744 +0000 UTC m=+3338.537551085" watchObservedRunningTime="2025-11-25 13:23:13.367661948 +0000 UTC m=+3338.539254299" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.445849 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6smlh/crc-debug-64vmg"] Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.447481 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.520722 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97j82\" (UniqueName: \"kubernetes.io/projected/cffcd74c-74ad-4719-84f4-c8ed620abb08-kube-api-access-97j82\") pod \"crc-debug-64vmg\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.520773 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cffcd74c-74ad-4719-84f4-c8ed620abb08-host\") pod \"crc-debug-64vmg\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.622964 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cffcd74c-74ad-4719-84f4-c8ed620abb08-host\") pod \"crc-debug-64vmg\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.623025 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97j82\" (UniqueName: \"kubernetes.io/projected/cffcd74c-74ad-4719-84f4-c8ed620abb08-kube-api-access-97j82\") pod \"crc-debug-64vmg\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.623068 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cffcd74c-74ad-4719-84f4-c8ed620abb08-host\") pod \"crc-debug-64vmg\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.661175 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97j82\" (UniqueName: \"kubernetes.io/projected/cffcd74c-74ad-4719-84f4-c8ed620abb08-kube-api-access-97j82\") pod \"crc-debug-64vmg\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:16 crc kubenswrapper[4675]: I1125 13:23:16.768197 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:23:17 crc kubenswrapper[4675]: I1125 13:23:17.398027 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-64vmg" event={"ID":"cffcd74c-74ad-4719-84f4-c8ed620abb08","Type":"ContainerStarted","Data":"638db2327ddcc55f1e1c6970dfb89efd3a26f4cbb116bea0787387f8fc011038"} Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.234049 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mmlc8"] Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.237960 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.250485 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmlc8"] Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.346897 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-utilities\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.347000 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-catalog-content\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.348363 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4h9n\" (UniqueName: \"kubernetes.io/projected/816c2252-ae55-4c5a-aab4-a4a2b804101f-kube-api-access-w4h9n\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.452014 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4h9n\" (UniqueName: \"kubernetes.io/projected/816c2252-ae55-4c5a-aab4-a4a2b804101f-kube-api-access-w4h9n\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.452143 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-utilities\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.452189 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-catalog-content\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.452713 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-catalog-content\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.453894 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-utilities\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.480251 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4h9n\" (UniqueName: \"kubernetes.io/projected/816c2252-ae55-4c5a-aab4-a4a2b804101f-kube-api-access-w4h9n\") pod \"community-operators-mmlc8\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.555020 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-64vmg" event={"ID":"cffcd74c-74ad-4719-84f4-c8ed620abb08","Type":"ContainerStarted","Data":"6b1bcca55f5671fbc668dc543e75423397cc034c045fdb5bc05e4b59c80bfbb5"} Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.579299 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6smlh/crc-debug-64vmg" podStartSLOduration=1.6273543579999998 podStartE2EDuration="14.57927617s" podCreationTimestamp="2025-11-25 13:23:16 +0000 UTC" firstStartedPulling="2025-11-25 13:23:16.818008412 +0000 UTC m=+3341.989600753" lastFinishedPulling="2025-11-25 13:23:29.769930224 +0000 UTC m=+3354.941522565" observedRunningTime="2025-11-25 13:23:30.574696707 +0000 UTC m=+3355.746289058" watchObservedRunningTime="2025-11-25 13:23:30.57927617 +0000 UTC m=+3355.750868511" Nov 25 13:23:30 crc kubenswrapper[4675]: I1125 13:23:30.624043 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:31 crc kubenswrapper[4675]: I1125 13:23:31.131167 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmlc8"] Nov 25 13:23:31 crc kubenswrapper[4675]: I1125 13:23:31.575152 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerStarted","Data":"8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1"} Nov 25 13:23:31 crc kubenswrapper[4675]: I1125 13:23:31.575405 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerStarted","Data":"b3cc39e39a06e3aae3851de995804958e2cd4f03e80deea96205ecf266f0ef4a"} Nov 25 13:23:32 crc kubenswrapper[4675]: I1125 13:23:32.587768 4675 generic.go:334] "Generic (PLEG): container finished" podID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerID="8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1" exitCode=0 Nov 25 13:23:32 crc kubenswrapper[4675]: I1125 13:23:32.587867 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerDied","Data":"8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1"} Nov 25 13:23:34 crc kubenswrapper[4675]: I1125 13:23:34.612756 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerStarted","Data":"bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd"} Nov 25 13:23:37 crc kubenswrapper[4675]: I1125 13:23:37.640137 4675 generic.go:334] "Generic (PLEG): container finished" podID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerID="bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd" exitCode=0 Nov 25 13:23:37 crc kubenswrapper[4675]: I1125 13:23:37.640226 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerDied","Data":"bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd"} Nov 25 13:23:43 crc kubenswrapper[4675]: I1125 13:23:43.694996 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerStarted","Data":"4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf"} Nov 25 13:23:43 crc kubenswrapper[4675]: I1125 13:23:43.717352 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mmlc8" podStartSLOduration=3.462488587 podStartE2EDuration="13.717331877s" podCreationTimestamp="2025-11-25 13:23:30 +0000 UTC" firstStartedPulling="2025-11-25 13:23:32.59037974 +0000 UTC m=+3357.761972091" lastFinishedPulling="2025-11-25 13:23:42.84522304 +0000 UTC m=+3368.016815381" observedRunningTime="2025-11-25 13:23:43.713731934 +0000 UTC m=+3368.885324275" watchObservedRunningTime="2025-11-25 13:23:43.717331877 +0000 UTC m=+3368.888924238" Nov 25 13:23:50 crc kubenswrapper[4675]: I1125 13:23:50.624427 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:50 crc kubenswrapper[4675]: I1125 13:23:50.625059 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:23:51 crc kubenswrapper[4675]: I1125 13:23:51.676179 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-mmlc8" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="registry-server" probeResult="failure" output=< Nov 25 13:23:51 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:23:51 crc kubenswrapper[4675]: > Nov 25 13:24:00 crc kubenswrapper[4675]: I1125 13:24:00.679413 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:24:00 crc kubenswrapper[4675]: I1125 13:24:00.731532 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:24:01 crc kubenswrapper[4675]: I1125 13:24:01.438237 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmlc8"] Nov 25 13:24:01 crc kubenswrapper[4675]: I1125 13:24:01.861059 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mmlc8" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="registry-server" containerID="cri-o://4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf" gracePeriod=2 Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.358088 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.454730 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4h9n\" (UniqueName: \"kubernetes.io/projected/816c2252-ae55-4c5a-aab4-a4a2b804101f-kube-api-access-w4h9n\") pod \"816c2252-ae55-4c5a-aab4-a4a2b804101f\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.454844 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-catalog-content\") pod \"816c2252-ae55-4c5a-aab4-a4a2b804101f\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.454883 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-utilities\") pod \"816c2252-ae55-4c5a-aab4-a4a2b804101f\" (UID: \"816c2252-ae55-4c5a-aab4-a4a2b804101f\") " Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.455978 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-utilities" (OuterVolumeSpecName: "utilities") pod "816c2252-ae55-4c5a-aab4-a4a2b804101f" (UID: "816c2252-ae55-4c5a-aab4-a4a2b804101f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.461858 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/816c2252-ae55-4c5a-aab4-a4a2b804101f-kube-api-access-w4h9n" (OuterVolumeSpecName: "kube-api-access-w4h9n") pod "816c2252-ae55-4c5a-aab4-a4a2b804101f" (UID: "816c2252-ae55-4c5a-aab4-a4a2b804101f"). InnerVolumeSpecName "kube-api-access-w4h9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.514126 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "816c2252-ae55-4c5a-aab4-a4a2b804101f" (UID: "816c2252-ae55-4c5a-aab4-a4a2b804101f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.556592 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4h9n\" (UniqueName: \"kubernetes.io/projected/816c2252-ae55-4c5a-aab4-a4a2b804101f-kube-api-access-w4h9n\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.556624 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.556635 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/816c2252-ae55-4c5a-aab4-a4a2b804101f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.874668 4675 generic.go:334] "Generic (PLEG): container finished" podID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerID="4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf" exitCode=0 Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.874727 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerDied","Data":"4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf"} Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.874760 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmlc8" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.874765 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmlc8" event={"ID":"816c2252-ae55-4c5a-aab4-a4a2b804101f","Type":"ContainerDied","Data":"b3cc39e39a06e3aae3851de995804958e2cd4f03e80deea96205ecf266f0ef4a"} Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.874797 4675 scope.go:117] "RemoveContainer" containerID="4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.915030 4675 scope.go:117] "RemoveContainer" containerID="bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.926870 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmlc8"] Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.934036 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mmlc8"] Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.944669 4675 scope.go:117] "RemoveContainer" containerID="8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.991059 4675 scope.go:117] "RemoveContainer" containerID="4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf" Nov 25 13:24:02 crc kubenswrapper[4675]: E1125 13:24:02.992331 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf\": container with ID starting with 4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf not found: ID does not exist" containerID="4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.992369 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf"} err="failed to get container status \"4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf\": rpc error: code = NotFound desc = could not find container \"4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf\": container with ID starting with 4e403e41dc1b0664b4edb327156453526fbeb21d1b2f545fb703876fe30a2adf not found: ID does not exist" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.992392 4675 scope.go:117] "RemoveContainer" containerID="bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd" Nov 25 13:24:02 crc kubenswrapper[4675]: E1125 13:24:02.992687 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd\": container with ID starting with bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd not found: ID does not exist" containerID="bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.992718 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd"} err="failed to get container status \"bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd\": rpc error: code = NotFound desc = could not find container \"bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd\": container with ID starting with bdd1bbbf2f3001847c0662412dc1c96f78a72fa0a163d3a513b494170c09fabd not found: ID does not exist" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.992736 4675 scope.go:117] "RemoveContainer" containerID="8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1" Nov 25 13:24:02 crc kubenswrapper[4675]: E1125 13:24:02.993247 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1\": container with ID starting with 8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1 not found: ID does not exist" containerID="8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1" Nov 25 13:24:02 crc kubenswrapper[4675]: I1125 13:24:02.993273 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1"} err="failed to get container status \"8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1\": rpc error: code = NotFound desc = could not find container \"8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1\": container with ID starting with 8054b0f8c5ecded6a574013c877239cd4bd3dafcd683dd12601041c39d7109d1 not found: ID does not exist" Nov 25 13:24:03 crc kubenswrapper[4675]: I1125 13:24:03.546010 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" path="/var/lib/kubelet/pods/816c2252-ae55-4c5a-aab4-a4a2b804101f/volumes" Nov 25 13:24:16 crc kubenswrapper[4675]: I1125 13:24:16.012407 4675 generic.go:334] "Generic (PLEG): container finished" podID="cffcd74c-74ad-4719-84f4-c8ed620abb08" containerID="6b1bcca55f5671fbc668dc543e75423397cc034c045fdb5bc05e4b59c80bfbb5" exitCode=0 Nov 25 13:24:16 crc kubenswrapper[4675]: I1125 13:24:16.012517 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-64vmg" event={"ID":"cffcd74c-74ad-4719-84f4-c8ed620abb08","Type":"ContainerDied","Data":"6b1bcca55f5671fbc668dc543e75423397cc034c045fdb5bc05e4b59c80bfbb5"} Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.118409 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.153860 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6smlh/crc-debug-64vmg"] Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.163005 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6smlh/crc-debug-64vmg"] Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.238014 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97j82\" (UniqueName: \"kubernetes.io/projected/cffcd74c-74ad-4719-84f4-c8ed620abb08-kube-api-access-97j82\") pod \"cffcd74c-74ad-4719-84f4-c8ed620abb08\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.238076 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cffcd74c-74ad-4719-84f4-c8ed620abb08-host\") pod \"cffcd74c-74ad-4719-84f4-c8ed620abb08\" (UID: \"cffcd74c-74ad-4719-84f4-c8ed620abb08\") " Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.238505 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cffcd74c-74ad-4719-84f4-c8ed620abb08-host" (OuterVolumeSpecName: "host") pod "cffcd74c-74ad-4719-84f4-c8ed620abb08" (UID: "cffcd74c-74ad-4719-84f4-c8ed620abb08"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.244582 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cffcd74c-74ad-4719-84f4-c8ed620abb08-kube-api-access-97j82" (OuterVolumeSpecName: "kube-api-access-97j82") pod "cffcd74c-74ad-4719-84f4-c8ed620abb08" (UID: "cffcd74c-74ad-4719-84f4-c8ed620abb08"). InnerVolumeSpecName "kube-api-access-97j82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.340604 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97j82\" (UniqueName: \"kubernetes.io/projected/cffcd74c-74ad-4719-84f4-c8ed620abb08-kube-api-access-97j82\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.340974 4675 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cffcd74c-74ad-4719-84f4-c8ed620abb08-host\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:17 crc kubenswrapper[4675]: I1125 13:24:17.543875 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cffcd74c-74ad-4719-84f4-c8ed620abb08" path="/var/lib/kubelet/pods/cffcd74c-74ad-4719-84f4-c8ed620abb08/volumes" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.031694 4675 scope.go:117] "RemoveContainer" containerID="6b1bcca55f5671fbc668dc543e75423397cc034c045fdb5bc05e4b59c80bfbb5" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.031949 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-64vmg" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.324087 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6smlh/crc-debug-kbxwh"] Nov 25 13:24:18 crc kubenswrapper[4675]: E1125 13:24:18.325336 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="extract-content" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.325404 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="extract-content" Nov 25 13:24:18 crc kubenswrapper[4675]: E1125 13:24:18.325473 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffcd74c-74ad-4719-84f4-c8ed620abb08" containerName="container-00" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.325528 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffcd74c-74ad-4719-84f4-c8ed620abb08" containerName="container-00" Nov 25 13:24:18 crc kubenswrapper[4675]: E1125 13:24:18.325599 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="registry-server" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.325648 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="registry-server" Nov 25 13:24:18 crc kubenswrapper[4675]: E1125 13:24:18.325704 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="extract-utilities" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.325752 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="extract-utilities" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.326033 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="816c2252-ae55-4c5a-aab4-a4a2b804101f" containerName="registry-server" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.326113 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="cffcd74c-74ad-4719-84f4-c8ed620abb08" containerName="container-00" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.326742 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.359179 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/490e39cd-96d5-46d9-848d-5217e9b20070-host\") pod \"crc-debug-kbxwh\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.359540 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j6tg\" (UniqueName: \"kubernetes.io/projected/490e39cd-96d5-46d9-848d-5217e9b20070-kube-api-access-7j6tg\") pod \"crc-debug-kbxwh\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.460961 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/490e39cd-96d5-46d9-848d-5217e9b20070-host\") pod \"crc-debug-kbxwh\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.461272 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j6tg\" (UniqueName: \"kubernetes.io/projected/490e39cd-96d5-46d9-848d-5217e9b20070-kube-api-access-7j6tg\") pod \"crc-debug-kbxwh\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.461048 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/490e39cd-96d5-46d9-848d-5217e9b20070-host\") pod \"crc-debug-kbxwh\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.478863 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j6tg\" (UniqueName: \"kubernetes.io/projected/490e39cd-96d5-46d9-848d-5217e9b20070-kube-api-access-7j6tg\") pod \"crc-debug-kbxwh\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: I1125 13:24:18.642671 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:18 crc kubenswrapper[4675]: W1125 13:24:18.682001 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod490e39cd_96d5_46d9_848d_5217e9b20070.slice/crio-21de01aee1e39cfbdd1913bcad782e7b56d9222cb45b0cf237560083f8798846 WatchSource:0}: Error finding container 21de01aee1e39cfbdd1913bcad782e7b56d9222cb45b0cf237560083f8798846: Status 404 returned error can't find the container with id 21de01aee1e39cfbdd1913bcad782e7b56d9222cb45b0cf237560083f8798846 Nov 25 13:24:19 crc kubenswrapper[4675]: I1125 13:24:19.040915 4675 generic.go:334] "Generic (PLEG): container finished" podID="490e39cd-96d5-46d9-848d-5217e9b20070" containerID="3fd443a62e459237191c1be98249a9ea7983c92015ea51fc77ff11bdf6d185de" exitCode=0 Nov 25 13:24:19 crc kubenswrapper[4675]: I1125 13:24:19.040994 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-kbxwh" event={"ID":"490e39cd-96d5-46d9-848d-5217e9b20070","Type":"ContainerDied","Data":"3fd443a62e459237191c1be98249a9ea7983c92015ea51fc77ff11bdf6d185de"} Nov 25 13:24:19 crc kubenswrapper[4675]: I1125 13:24:19.041346 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-kbxwh" event={"ID":"490e39cd-96d5-46d9-848d-5217e9b20070","Type":"ContainerStarted","Data":"21de01aee1e39cfbdd1913bcad782e7b56d9222cb45b0cf237560083f8798846"} Nov 25 13:24:19 crc kubenswrapper[4675]: I1125 13:24:19.477543 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6smlh/crc-debug-kbxwh"] Nov 25 13:24:19 crc kubenswrapper[4675]: I1125 13:24:19.486497 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6smlh/crc-debug-kbxwh"] Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.159971 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.338577 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/490e39cd-96d5-46d9-848d-5217e9b20070-host\") pod \"490e39cd-96d5-46d9-848d-5217e9b20070\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.338695 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/490e39cd-96d5-46d9-848d-5217e9b20070-host" (OuterVolumeSpecName: "host") pod "490e39cd-96d5-46d9-848d-5217e9b20070" (UID: "490e39cd-96d5-46d9-848d-5217e9b20070"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.338909 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7j6tg\" (UniqueName: \"kubernetes.io/projected/490e39cd-96d5-46d9-848d-5217e9b20070-kube-api-access-7j6tg\") pod \"490e39cd-96d5-46d9-848d-5217e9b20070\" (UID: \"490e39cd-96d5-46d9-848d-5217e9b20070\") " Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.339528 4675 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/490e39cd-96d5-46d9-848d-5217e9b20070-host\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.344432 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/490e39cd-96d5-46d9-848d-5217e9b20070-kube-api-access-7j6tg" (OuterVolumeSpecName: "kube-api-access-7j6tg") pod "490e39cd-96d5-46d9-848d-5217e9b20070" (UID: "490e39cd-96d5-46d9-848d-5217e9b20070"). InnerVolumeSpecName "kube-api-access-7j6tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.441534 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7j6tg\" (UniqueName: \"kubernetes.io/projected/490e39cd-96d5-46d9-848d-5217e9b20070-kube-api-access-7j6tg\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.632044 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6smlh/crc-debug-b5lmw"] Nov 25 13:24:20 crc kubenswrapper[4675]: E1125 13:24:20.632440 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="490e39cd-96d5-46d9-848d-5217e9b20070" containerName="container-00" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.632453 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="490e39cd-96d5-46d9-848d-5217e9b20070" containerName="container-00" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.632623 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="490e39cd-96d5-46d9-848d-5217e9b20070" containerName="container-00" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.633255 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.644545 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8v96\" (UniqueName: \"kubernetes.io/projected/18415355-82ef-43e5-b0ac-feb493e7d00e-kube-api-access-g8v96\") pod \"crc-debug-b5lmw\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.644648 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18415355-82ef-43e5-b0ac-feb493e7d00e-host\") pod \"crc-debug-b5lmw\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.746440 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8v96\" (UniqueName: \"kubernetes.io/projected/18415355-82ef-43e5-b0ac-feb493e7d00e-kube-api-access-g8v96\") pod \"crc-debug-b5lmw\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.746499 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18415355-82ef-43e5-b0ac-feb493e7d00e-host\") pod \"crc-debug-b5lmw\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.746669 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18415355-82ef-43e5-b0ac-feb493e7d00e-host\") pod \"crc-debug-b5lmw\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.769111 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8v96\" (UniqueName: \"kubernetes.io/projected/18415355-82ef-43e5-b0ac-feb493e7d00e-kube-api-access-g8v96\") pod \"crc-debug-b5lmw\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.940030 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9tvf7"] Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.944717 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.949379 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:20 crc kubenswrapper[4675]: I1125 13:24:20.956391 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9tvf7"] Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.052848 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-catalog-content\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.052903 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-849tp\" (UniqueName: \"kubernetes.io/projected/fccf422b-ba28-4570-81ea-a8f4943fd51e-kube-api-access-849tp\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.053025 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-utilities\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.062283 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-kbxwh" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.062332 4675 scope.go:117] "RemoveContainer" containerID="3fd443a62e459237191c1be98249a9ea7983c92015ea51fc77ff11bdf6d185de" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.064990 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-b5lmw" event={"ID":"18415355-82ef-43e5-b0ac-feb493e7d00e","Type":"ContainerStarted","Data":"1a078be7efdfa27447682c47189a212c6417045e7f45eec5446b4f9775f5e8c6"} Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.154140 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-catalog-content\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.154186 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-849tp\" (UniqueName: \"kubernetes.io/projected/fccf422b-ba28-4570-81ea-a8f4943fd51e-kube-api-access-849tp\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.154309 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-utilities\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.154766 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-utilities\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.154826 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-catalog-content\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.173465 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-849tp\" (UniqueName: \"kubernetes.io/projected/fccf422b-ba28-4570-81ea-a8f4943fd51e-kube-api-access-849tp\") pod \"redhat-operators-9tvf7\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.273787 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.549653 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="490e39cd-96d5-46d9-848d-5217e9b20070" path="/var/lib/kubelet/pods/490e39cd-96d5-46d9-848d-5217e9b20070/volumes" Nov 25 13:24:21 crc kubenswrapper[4675]: I1125 13:24:21.790718 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9tvf7"] Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.075212 4675 generic.go:334] "Generic (PLEG): container finished" podID="18415355-82ef-43e5-b0ac-feb493e7d00e" containerID="95ad30beaa4bf3082c13048390af5fcfde597e3a84bcd26906e318a114a115c2" exitCode=0 Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.075295 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/crc-debug-b5lmw" event={"ID":"18415355-82ef-43e5-b0ac-feb493e7d00e","Type":"ContainerDied","Data":"95ad30beaa4bf3082c13048390af5fcfde597e3a84bcd26906e318a114a115c2"} Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.077217 4675 generic.go:334] "Generic (PLEG): container finished" podID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerID="71e92723f6fa92ebfe8bc489215e80a0dbfe904f24497af35dc292963f17dc5f" exitCode=0 Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.077263 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerDied","Data":"71e92723f6fa92ebfe8bc489215e80a0dbfe904f24497af35dc292963f17dc5f"} Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.077292 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerStarted","Data":"c9035f464d3810a6ac977c27640bee1613607acdfcc4fe2774a64303e70f3439"} Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.155652 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6smlh/crc-debug-b5lmw"] Nov 25 13:24:22 crc kubenswrapper[4675]: I1125 13:24:22.164066 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6smlh/crc-debug-b5lmw"] Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.089336 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerStarted","Data":"fd6441701b1fba33bc4d4b66ecaa7e01f8c51c94d35b39521e3ef7139a15df9c"} Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.203152 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.396491 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18415355-82ef-43e5-b0ac-feb493e7d00e-host\") pod \"18415355-82ef-43e5-b0ac-feb493e7d00e\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.396634 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/18415355-82ef-43e5-b0ac-feb493e7d00e-host" (OuterVolumeSpecName: "host") pod "18415355-82ef-43e5-b0ac-feb493e7d00e" (UID: "18415355-82ef-43e5-b0ac-feb493e7d00e"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.397107 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8v96\" (UniqueName: \"kubernetes.io/projected/18415355-82ef-43e5-b0ac-feb493e7d00e-kube-api-access-g8v96\") pod \"18415355-82ef-43e5-b0ac-feb493e7d00e\" (UID: \"18415355-82ef-43e5-b0ac-feb493e7d00e\") " Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.397632 4675 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18415355-82ef-43e5-b0ac-feb493e7d00e-host\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.404332 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18415355-82ef-43e5-b0ac-feb493e7d00e-kube-api-access-g8v96" (OuterVolumeSpecName: "kube-api-access-g8v96") pod "18415355-82ef-43e5-b0ac-feb493e7d00e" (UID: "18415355-82ef-43e5-b0ac-feb493e7d00e"). InnerVolumeSpecName "kube-api-access-g8v96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.498348 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8v96\" (UniqueName: \"kubernetes.io/projected/18415355-82ef-43e5-b0ac-feb493e7d00e-kube-api-access-g8v96\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:23 crc kubenswrapper[4675]: I1125 13:24:23.543571 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18415355-82ef-43e5-b0ac-feb493e7d00e" path="/var/lib/kubelet/pods/18415355-82ef-43e5-b0ac-feb493e7d00e/volumes" Nov 25 13:24:24 crc kubenswrapper[4675]: I1125 13:24:24.099890 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/crc-debug-b5lmw" Nov 25 13:24:24 crc kubenswrapper[4675]: I1125 13:24:24.099947 4675 scope.go:117] "RemoveContainer" containerID="95ad30beaa4bf3082c13048390af5fcfde597e3a84bcd26906e318a114a115c2" Nov 25 13:24:28 crc kubenswrapper[4675]: I1125 13:24:28.145886 4675 generic.go:334] "Generic (PLEG): container finished" podID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerID="fd6441701b1fba33bc4d4b66ecaa7e01f8c51c94d35b39521e3ef7139a15df9c" exitCode=0 Nov 25 13:24:28 crc kubenswrapper[4675]: I1125 13:24:28.145958 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerDied","Data":"fd6441701b1fba33bc4d4b66ecaa7e01f8c51c94d35b39521e3ef7139a15df9c"} Nov 25 13:24:29 crc kubenswrapper[4675]: I1125 13:24:29.158849 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerStarted","Data":"5b3d4836ce2117be43de4f8cb5029a566a0cfeb94661a3c5385fb8438467023b"} Nov 25 13:24:29 crc kubenswrapper[4675]: I1125 13:24:29.181315 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9tvf7" podStartSLOduration=2.7339853549999997 podStartE2EDuration="9.18128118s" podCreationTimestamp="2025-11-25 13:24:20 +0000 UTC" firstStartedPulling="2025-11-25 13:24:22.079891317 +0000 UTC m=+3407.251483658" lastFinishedPulling="2025-11-25 13:24:28.527187142 +0000 UTC m=+3413.698779483" observedRunningTime="2025-11-25 13:24:29.1784195 +0000 UTC m=+3414.350011841" watchObservedRunningTime="2025-11-25 13:24:29.18128118 +0000 UTC m=+3414.352873521" Nov 25 13:24:31 crc kubenswrapper[4675]: I1125 13:24:31.274056 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:31 crc kubenswrapper[4675]: I1125 13:24:31.274381 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:32 crc kubenswrapper[4675]: I1125 13:24:32.332724 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9tvf7" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="registry-server" probeResult="failure" output=< Nov 25 13:24:32 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:24:32 crc kubenswrapper[4675]: > Nov 25 13:24:40 crc kubenswrapper[4675]: I1125 13:24:40.656305 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ff8694d7d-s9pzw_51325415-d3b2-4852-bdce-6861cd1dc391/barbican-api/0.log" Nov 25 13:24:40 crc kubenswrapper[4675]: I1125 13:24:40.968971 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-858b4645fd-rr59w_a4640f4e-98fe-438c-bc12-38c11c62f997/barbican-keystone-listener/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.224834 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-858b4645fd-rr59w_a4640f4e-98fe-438c-bc12-38c11c62f997/barbican-keystone-listener-log/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.241630 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f59d94c69-6bgtx_654bfacb-d4b4-45a3-ae90-92496e1b5e9e/barbican-worker/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.351357 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.407348 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.597936 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9tvf7"] Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.617852 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ff8694d7d-s9pzw_51325415-d3b2-4852-bdce-6861cd1dc391/barbican-api-log/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.680792 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f59d94c69-6bgtx_654bfacb-d4b4-45a3-ae90-92496e1b5e9e/barbican-worker-log/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.731134 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f_3f838da8-c090-474d-826e-592b92857777/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.903744 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/ceilometer-central-agent/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.949708 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/ceilometer-notification-agent/0.log" Nov 25 13:24:41 crc kubenswrapper[4675]: I1125 13:24:41.986510 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/proxy-httpd/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.088799 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/sg-core/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.244738 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_90a143ed-4c09-4ac7-8dd9-869c15e9ef3c/cinder-api/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.307860 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_90a143ed-4c09-4ac7-8dd9-869c15e9ef3c/cinder-api-log/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.459984 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_46c62718-04bb-45f3-bd9c-08957f1241a7/probe/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.530573 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_46c62718-04bb-45f3-bd9c-08957f1241a7/cinder-scheduler/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.636287 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb_17c7ad50-1a29-478e-b1df-a0084c3142df/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.805478 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5_3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:42 crc kubenswrapper[4675]: I1125 13:24:42.931717 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-r9wsm_0e609a3d-0025-458f-8086-595a9923a23d/init/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.112838 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-r9wsm_0e609a3d-0025-458f-8086-595a9923a23d/init/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.163537 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-r9wsm_0e609a3d-0025-458f-8086-595a9923a23d/dnsmasq-dns/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.233288 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-6stz8_25fb1275-2632-4271-b41e-909adabbdf27/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.278342 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9tvf7" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="registry-server" containerID="cri-o://5b3d4836ce2117be43de4f8cb5029a566a0cfeb94661a3c5385fb8438467023b" gracePeriod=2 Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.364330 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5d10980d-6f3c-4a3f-a4ce-30e07d985393/glance-log/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.371084 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5d10980d-6f3c-4a3f-a4ce-30e07d985393/glance-httpd/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.640508 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d478f76e-2629-426c-8a29-60b4cce437f2/glance-httpd/0.log" Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.664194 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:24:43 crc kubenswrapper[4675]: I1125 13:24:43.664239 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.247854 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d478f76e-2629-426c-8a29-60b4cce437f2/glance-log/0.log" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.318048 4675 generic.go:334] "Generic (PLEG): container finished" podID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerID="5b3d4836ce2117be43de4f8cb5029a566a0cfeb94661a3c5385fb8438467023b" exitCode=0 Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.318422 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerDied","Data":"5b3d4836ce2117be43de4f8cb5029a566a0cfeb94661a3c5385fb8438467023b"} Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.318455 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9tvf7" event={"ID":"fccf422b-ba28-4570-81ea-a8f4943fd51e","Type":"ContainerDied","Data":"c9035f464d3810a6ac977c27640bee1613607acdfcc4fe2774a64303e70f3439"} Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.318468 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9035f464d3810a6ac977c27640bee1613607acdfcc4fe2774a64303e70f3439" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.332417 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-85d4f84f96-fcncp_412d2040-4c83-4443-989e-cc844466e840/horizon/2.log" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.389808 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.555505 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-849tp\" (UniqueName: \"kubernetes.io/projected/fccf422b-ba28-4570-81ea-a8f4943fd51e-kube-api-access-849tp\") pod \"fccf422b-ba28-4570-81ea-a8f4943fd51e\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.555687 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-catalog-content\") pod \"fccf422b-ba28-4570-81ea-a8f4943fd51e\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.555756 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-utilities\") pod \"fccf422b-ba28-4570-81ea-a8f4943fd51e\" (UID: \"fccf422b-ba28-4570-81ea-a8f4943fd51e\") " Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.557408 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-utilities" (OuterVolumeSpecName: "utilities") pod "fccf422b-ba28-4570-81ea-a8f4943fd51e" (UID: "fccf422b-ba28-4570-81ea-a8f4943fd51e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.576825 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fccf422b-ba28-4570-81ea-a8f4943fd51e-kube-api-access-849tp" (OuterVolumeSpecName: "kube-api-access-849tp") pod "fccf422b-ba28-4570-81ea-a8f4943fd51e" (UID: "fccf422b-ba28-4570-81ea-a8f4943fd51e"). InnerVolumeSpecName "kube-api-access-849tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.598024 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-85d4f84f96-fcncp_412d2040-4c83-4443-989e-cc844466e840/horizon/1.log" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.658431 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-849tp\" (UniqueName: \"kubernetes.io/projected/fccf422b-ba28-4570-81ea-a8f4943fd51e-kube-api-access-849tp\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.658702 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.700984 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fccf422b-ba28-4570-81ea-a8f4943fd51e" (UID: "fccf422b-ba28-4570-81ea-a8f4943fd51e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.759425 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fccf422b-ba28-4570-81ea-a8f4943fd51e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.793724 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-c46tf_b04ef6a2-5500-4d9d-87ad-1ec2762a5a46/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:44 crc kubenswrapper[4675]: I1125 13:24:44.927304 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-85d4f84f96-fcncp_412d2040-4c83-4443-989e-cc844466e840/horizon-log/0.log" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.123874 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-vgqfd_21e88661-854c-481d-b024-c7c87ea9373a/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.325568 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9tvf7" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.383673 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9tvf7"] Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.403328 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9tvf7"] Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.451284 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-686987849d-794s5_ef21ec22-c1e4-490d-b59c-8ffec71be972/keystone-api/0.log" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.472878 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401261-blc64_b1779f11-7333-4094-a1ee-b509cc09da52/keystone-cron/0.log" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.545402 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" path="/var/lib/kubelet/pods/fccf422b-ba28-4570-81ea-a8f4943fd51e/volumes" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.554881 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_76b55738-1ee0-41a4-950a-faa08432f67f/kube-state-metrics/0.log" Nov 25 13:24:45 crc kubenswrapper[4675]: I1125 13:24:45.702223 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr_c466bd75-6cb4-452f-a4fa-d9a5dbec6840/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:46 crc kubenswrapper[4675]: I1125 13:24:46.062254 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6c6c8d8969-kqpxz_a69ab647-c53a-4fcc-86f0-d92a9eebf587/neutron-httpd/0.log" Nov 25 13:24:46 crc kubenswrapper[4675]: I1125 13:24:46.080253 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6c6c8d8969-kqpxz_a69ab647-c53a-4fcc-86f0-d92a9eebf587/neutron-api/0.log" Nov 25 13:24:46 crc kubenswrapper[4675]: I1125 13:24:46.278578 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg_4ee951ab-e497-4274-9251-85c92c498b0e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:46 crc kubenswrapper[4675]: I1125 13:24:46.538360 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_cf087269-8e7f-416e-9492-b3ccb72f40d0/nova-api-log/0.log" Nov 25 13:24:46 crc kubenswrapper[4675]: I1125 13:24:46.784515 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_98ffb1d8-055d-41be-8c54-7282e6e1c36d/nova-cell0-conductor-conductor/0.log" Nov 25 13:24:46 crc kubenswrapper[4675]: I1125 13:24:46.793976 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_cf087269-8e7f-416e-9492-b3ccb72f40d0/nova-api-api/0.log" Nov 25 13:24:47 crc kubenswrapper[4675]: I1125 13:24:47.053025 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_02b15b4e-fe03-46fb-9a34-c4e496129490/nova-cell1-conductor-conductor/0.log" Nov 25 13:24:47 crc kubenswrapper[4675]: I1125 13:24:47.160421 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_10485665-29b9-4a6f-ac17-3cca271b761d/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 13:24:47 crc kubenswrapper[4675]: I1125 13:24:47.429870 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-l9q4l_4c8ba7b5-22cd-44f4-9389-1f352f9a2368/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:47 crc kubenswrapper[4675]: I1125 13:24:47.640127 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_f3cab755-3df5-4cfb-880e-f842da175aeb/nova-metadata-log/0.log" Nov 25 13:24:47 crc kubenswrapper[4675]: I1125 13:24:47.758154 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_431204a3-00f5-425a-b473-86f86e2bc600/nova-scheduler-scheduler/0.log" Nov 25 13:24:47 crc kubenswrapper[4675]: I1125 13:24:47.972078 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c8756ef3-0fbe-457a-93ed-957baf6a60da/mysql-bootstrap/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.202399 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c8756ef3-0fbe-457a-93ed-957baf6a60da/mysql-bootstrap/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.344196 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c8756ef3-0fbe-457a-93ed-957baf6a60da/galera/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.455595 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3d992e9b-ee07-4194-90de-02816b3aec1e/mysql-bootstrap/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.614949 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_f3cab755-3df5-4cfb-880e-f842da175aeb/nova-metadata-metadata/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.719069 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3d992e9b-ee07-4194-90de-02816b3aec1e/galera/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.791298 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3d992e9b-ee07-4194-90de-02816b3aec1e/mysql-bootstrap/0.log" Nov 25 13:24:48 crc kubenswrapper[4675]: I1125 13:24:48.935447 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_4305dd31-2399-4e02-8b99-224a616e8c8c/openstackclient/0.log" Nov 25 13:24:49 crc kubenswrapper[4675]: I1125 13:24:49.024304 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-68qlk_6ef75e87-29e7-4d11-9547-430df2247d7b/openstack-network-exporter/0.log" Nov 25 13:24:49 crc kubenswrapper[4675]: I1125 13:24:49.232072 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovsdb-server-init/0.log" Nov 25 13:24:49 crc kubenswrapper[4675]: I1125 13:24:49.780964 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovsdb-server/0.log" Nov 25 13:24:49 crc kubenswrapper[4675]: I1125 13:24:49.792225 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovsdb-server-init/0.log" Nov 25 13:24:49 crc kubenswrapper[4675]: I1125 13:24:49.806999 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovs-vswitchd/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.146712 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vq5tr_5f3381e9-49d9-47ea-87dd-86442bf3394a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.159597 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-vgv4n_bd0994da-34e6-4f4c-b8a5-cae4c7923df7/ovn-controller/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.475005 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d2d15dcc-e29e-4b04-8de0-911cc8190e33/openstack-network-exporter/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.484642 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d2d15dcc-e29e-4b04-8de0-911cc8190e33/ovn-northd/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.545674 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c215d8eb-d320-4245-8bdb-73b0d600ea49/openstack-network-exporter/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.735536 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c215d8eb-d320-4245-8bdb-73b0d600ea49/ovsdbserver-nb/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.827337 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f36454cf-1208-4320-8a1d-8df0afad3983/openstack-network-exporter/0.log" Nov 25 13:24:50 crc kubenswrapper[4675]: I1125 13:24:50.880802 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f36454cf-1208-4320-8a1d-8df0afad3983/ovsdbserver-sb/0.log" Nov 25 13:24:51 crc kubenswrapper[4675]: I1125 13:24:51.120571 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf4848886-8rwx5_1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34/placement-api/0.log" Nov 25 13:24:51 crc kubenswrapper[4675]: I1125 13:24:51.141908 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf4848886-8rwx5_1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34/placement-log/0.log" Nov 25 13:24:51 crc kubenswrapper[4675]: I1125 13:24:51.411986 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_13f3fc9e-df33-4016-8d7e-a40112cdc27f/setup-container/0.log" Nov 25 13:24:51 crc kubenswrapper[4675]: I1125 13:24:51.578965 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_13f3fc9e-df33-4016-8d7e-a40112cdc27f/setup-container/0.log" Nov 25 13:24:51 crc kubenswrapper[4675]: I1125 13:24:51.667749 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa5e2576-e3fb-44a7-83ad-6193b6437ae0/setup-container/0.log" Nov 25 13:24:51 crc kubenswrapper[4675]: I1125 13:24:51.776754 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_13f3fc9e-df33-4016-8d7e-a40112cdc27f/rabbitmq/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.021549 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5_13de1b17-7309-4325-9264-52182799c3be/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.055199 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa5e2576-e3fb-44a7-83ad-6193b6437ae0/setup-container/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.141400 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa5e2576-e3fb-44a7-83ad-6193b6437ae0/rabbitmq/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.317165 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-gcz54_876a0f8c-9396-49fe-b1b8-5c44e691a7c9/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.426798 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z_25753af6-5930-488b-8ffc-8b905d803063/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.624095 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-87t9t_58949bc5-8d5f-4d04-bf70-eb2e0a55cda8/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.786596 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-mkh7c_0808241a-edce-45b6-ae18-7b0356549cf6/ssh-known-hosts-edpm-deployment/0.log" Nov 25 13:24:52 crc kubenswrapper[4675]: I1125 13:24:52.975249 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-675f685b59-np48s_08133520-c4c6-4b59-b426-d18290e4195a/proxy-httpd/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.109844 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-675f685b59-np48s_08133520-c4c6-4b59-b426-d18290e4195a/proxy-server/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.324025 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-g7rkg_959f7b20-344e-4759-8142-19a41f250c72/swift-ring-rebalance/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.425181 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-auditor/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.475285 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-reaper/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.629440 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-replicator/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.642401 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-server/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.760438 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-auditor/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.864179 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-replicator/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.897316 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-updater/0.log" Nov 25 13:24:53 crc kubenswrapper[4675]: I1125 13:24:53.931578 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-server/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.057767 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-auditor/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.153701 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-expirer/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.223650 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-server/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.227097 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-replicator/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.412253 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-updater/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.425381 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/rsync/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.502092 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/swift-recon-cron/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.784139 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_2beee9f5-8487-4f64-a55c-11f32c68c5fc/tempest-tests-tempest-tests-runner/0.log" Nov 25 13:24:54 crc kubenswrapper[4675]: I1125 13:24:54.790602 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj_3ca0e52a-979d-4834-a03d-135355de72db/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:24:55 crc kubenswrapper[4675]: I1125 13:24:55.107235 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_ac759df6-989d-47da-9259-b6d00e9e566e/test-operator-logs-container/0.log" Nov 25 13:24:55 crc kubenswrapper[4675]: I1125 13:24:55.165216 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9_92805c79-2eb0-4562-9aed-1a7c7b88a5aa/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:25:02 crc kubenswrapper[4675]: I1125 13:25:02.878706 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6efda04f-52f8-48b1-9afd-f606c3a72d50/memcached/0.log" Nov 25 13:25:13 crc kubenswrapper[4675]: I1125 13:25:13.662509 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:25:13 crc kubenswrapper[4675]: I1125 13:25:13.663081 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.420351 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/util/0.log" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.602700 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/util/0.log" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.605938 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/pull/0.log" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.654273 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/pull/0.log" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.858539 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/pull/0.log" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.898184 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/util/0.log" Nov 25 13:25:21 crc kubenswrapper[4675]: I1125 13:25:21.940781 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/extract/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.057018 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5689899996-24rxr_ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3/kube-rbac-proxy/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.154490 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-4hkh4_966aefc3-6c87-4e64-b9ae-0c175f4d18a3/kube-rbac-proxy/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.244359 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5689899996-24rxr_ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3/manager/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.346319 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-4hkh4_966aefc3-6c87-4e64-b9ae-0c175f4d18a3/manager/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.441237 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-4cprh_8d89af10-26a8-4d8b-aedf-8e450df0f28a/manager/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.472931 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-4cprh_8d89af10-26a8-4d8b-aedf-8e450df0f28a/kube-rbac-proxy/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.589201 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6bd966bbd4-hzjqx_986b1a58-05d0-4beb-9199-a7564c809455/kube-rbac-proxy/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.713786 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6bd966bbd4-hzjqx_986b1a58-05d0-4beb-9199-a7564c809455/manager/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.845472 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-nkq7r_223d4b40-6f09-41f5-816d-7e82b45b4b90/kube-rbac-proxy/0.log" Nov 25 13:25:22 crc kubenswrapper[4675]: I1125 13:25:22.908234 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-nkq7r_223d4b40-6f09-41f5-816d-7e82b45b4b90/manager/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.028134 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-n6gqt_51b6ef4f-14c9-4c56-b374-3183ccd5cacb/kube-rbac-proxy/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.057773 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-n6gqt_51b6ef4f-14c9-4c56-b374-3183ccd5cacb/manager/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.225512 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-577c5f6d94-svnp9_18941428-e287-4374-93e0-3209cdbbf7d7/kube-rbac-proxy/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.417189 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-577c5f6d94-svnp9_18941428-e287-4374-93e0-3209cdbbf7d7/manager/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.475368 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-6pvms_a271eb36-50fc-40c6-8885-f97f281c1150/kube-rbac-proxy/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.503098 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-6pvms_a271eb36-50fc-40c6-8885-f97f281c1150/manager/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.640229 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7d6f5d799-7p97w_fbd303b9-17db-401e-acbf-1ef8219e36df/kube-rbac-proxy/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.751964 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7d6f5d799-7p97w_fbd303b9-17db-401e-acbf-1ef8219e36df/manager/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.878986 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-jdxms_e6ff98cd-4075-49dd-b40b-d1923298513e/kube-rbac-proxy/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.888945 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-jdxms_e6ff98cd-4075-49dd-b40b-d1923298513e/manager/0.log" Nov 25 13:25:23 crc kubenswrapper[4675]: I1125 13:25:23.952667 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-tq6jf_33456bb6-8430-432c-ac26-1c43307141e3/kube-rbac-proxy/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.183801 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-tq6jf_33456bb6-8430-432c-ac26-1c43307141e3/manager/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.187431 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6b6c55ffd5-84vzh_a5a68379-3de8-4970-8ca1-ccf52f2d7ad8/kube-rbac-proxy/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.257148 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6b6c55ffd5-84vzh_a5a68379-3de8-4970-8ca1-ccf52f2d7ad8/manager/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.397800 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dls9t_d4608140-77a4-4067-b58e-a95ae2249fea/kube-rbac-proxy/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.598499 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dls9t_d4608140-77a4-4067-b58e-a95ae2249fea/manager/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.642196 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7979c68bc7-m6zl4_8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1/manager/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.682777 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7979c68bc7-m6zl4_8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1/kube-rbac-proxy/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.788235 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-xxcn9_bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48/kube-rbac-proxy/0.log" Nov 25 13:25:24 crc kubenswrapper[4675]: I1125 13:25:24.874148 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-xxcn9_bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48/manager/0.log" Nov 25 13:25:25 crc kubenswrapper[4675]: I1125 13:25:25.052989 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-75cf7cf5cb-gbbjk_21978291-afd8-477d-9e86-80a465441902/kube-rbac-proxy/0.log" Nov 25 13:25:25 crc kubenswrapper[4675]: I1125 13:25:25.211089 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77b99896c6-rz556_01418b22-5bf7-4486-bc9c-fe8d6d757b3d/kube-rbac-proxy/0.log" Nov 25 13:25:25 crc kubenswrapper[4675]: I1125 13:25:25.483765 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-9qsg7_c3e36545-46f5-4907-84b0-93ed29882b8c/registry-server/0.log" Nov 25 13:25:25 crc kubenswrapper[4675]: I1125 13:25:25.608687 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77b99896c6-rz556_01418b22-5bf7-4486-bc9c-fe8d6d757b3d/operator/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.059060 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-4pmkv_9495eb50-984d-4069-bd95-719e714b1178/kube-rbac-proxy/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.091177 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-75cf7cf5cb-gbbjk_21978291-afd8-477d-9e86-80a465441902/manager/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.238362 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-4pmkv_9495eb50-984d-4069-bd95-719e714b1178/manager/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.289316 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-2z8vf_e8f46595-6a0c-4b55-9839-3360395606f7/kube-rbac-proxy/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.327595 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-2z8vf_e8f46595-6a0c-4b55-9839-3360395606f7/manager/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.438806 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_64b432ef-6de9-4d8d-84ce-78f2097bf31e/operator/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.521483 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-cc9f5bc5c-lr9bx_6fa6f393-fc29-4035-81da-a9965421c77f/kube-rbac-proxy/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.583221 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-cc9f5bc5c-lr9bx_6fa6f393-fc29-4035-81da-a9965421c77f/manager/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.778249 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58487d9bf4-9rf4d_88da95fd-fdf9-402d-90d8-e742f92cffbb/manager/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.806827 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58487d9bf4-9rf4d_88da95fd-fdf9-402d-90d8-e742f92cffbb/kube-rbac-proxy/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.881945 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-rkgfz_2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12/kube-rbac-proxy/0.log" Nov 25 13:25:26 crc kubenswrapper[4675]: I1125 13:25:26.966736 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-rkgfz_2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12/manager/0.log" Nov 25 13:25:27 crc kubenswrapper[4675]: I1125 13:25:27.049889 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-r6m74_a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb/kube-rbac-proxy/0.log" Nov 25 13:25:27 crc kubenswrapper[4675]: I1125 13:25:27.098232 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-r6m74_a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb/manager/0.log" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.282438 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-9ss4k_292203b1-555c-4331-90c7-f3a56ee042ba/control-plane-machine-set-operator/0.log" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.372398 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wh2qp_962d971d-f0de-4d22-a854-e4a65644b9b8/kube-rbac-proxy/0.log" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.432486 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wh2qp_962d971d-f0de-4d22-a854-e4a65644b9b8/machine-api-operator/0.log" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.662426 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.662477 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.662516 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.663290 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2803df1792774157acda9ae9775048cc478a841249be56ff9bc1791032dca380"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.663360 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://2803df1792774157acda9ae9775048cc478a841249be56ff9bc1791032dca380" gracePeriod=600 Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.886590 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="2803df1792774157acda9ae9775048cc478a841249be56ff9bc1791032dca380" exitCode=0 Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.886638 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"2803df1792774157acda9ae9775048cc478a841249be56ff9bc1791032dca380"} Nov 25 13:25:43 crc kubenswrapper[4675]: I1125 13:25:43.886673 4675 scope.go:117] "RemoveContainer" containerID="d72b838bc772f9a6f79f1d1e9265ff38de612e3f7ab926b80a452b622f41d4ba" Nov 25 13:25:44 crc kubenswrapper[4675]: I1125 13:25:44.918175 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d"} Nov 25 13:25:55 crc kubenswrapper[4675]: I1125 13:25:55.669289 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-6lknr_ff650df4-ed32-43ee-99cf-25ea4d4b55d8/cert-manager-controller/0.log" Nov 25 13:25:55 crc kubenswrapper[4675]: I1125 13:25:55.754386 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mvcg9_9624922e-a281-4931-97a5-47ae5c1e78f4/cert-manager-cainjector/0.log" Nov 25 13:25:55 crc kubenswrapper[4675]: I1125 13:25:55.838065 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-j2mvp_23b012ff-b039-47d5-84d0-276ef8ab953b/cert-manager-webhook/0.log" Nov 25 13:26:07 crc kubenswrapper[4675]: I1125 13:26:07.956047 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-2l8n5_c147bbe0-eb8e-44ff-b4e2-271af218f1ff/nmstate-console-plugin/0.log" Nov 25 13:26:08 crc kubenswrapper[4675]: I1125 13:26:08.170088 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-xzw2f_83ed5cc3-8ce1-4765-b975-c7a543434c95/nmstate-handler/0.log" Nov 25 13:26:08 crc kubenswrapper[4675]: I1125 13:26:08.201360 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-gwgnx_48952822-4273-4558-b07e-ad6e9e80dbdf/kube-rbac-proxy/0.log" Nov 25 13:26:08 crc kubenswrapper[4675]: I1125 13:26:08.318740 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-gwgnx_48952822-4273-4558-b07e-ad6e9e80dbdf/nmstate-metrics/0.log" Nov 25 13:26:08 crc kubenswrapper[4675]: I1125 13:26:08.385494 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-tld8t_669a3ab6-17cd-4b0e-8498-bbf3bd4041f4/nmstate-operator/0.log" Nov 25 13:26:08 crc kubenswrapper[4675]: I1125 13:26:08.570030 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-z8hd2_fff162cc-a7a0-4cbb-930f-7867fbb1cf70/nmstate-webhook/0.log" Nov 25 13:26:24 crc kubenswrapper[4675]: I1125 13:26:24.846560 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-dnrkj_c7735c55-9453-4052-8156-30e1155c73eb/kube-rbac-proxy/0.log" Nov 25 13:26:24 crc kubenswrapper[4675]: I1125 13:26:24.973177 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-dnrkj_c7735c55-9453-4052-8156-30e1155c73eb/controller/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.153678 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.314591 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.372222 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.433596 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.453384 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.651091 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.659939 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.702070 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.746579 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.862401 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.944295 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.949203 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:26:25 crc kubenswrapper[4675]: I1125 13:26:25.976953 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/controller/0.log" Nov 25 13:26:26 crc kubenswrapper[4675]: I1125 13:26:26.130731 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/frr-metrics/0.log" Nov 25 13:26:26 crc kubenswrapper[4675]: I1125 13:26:26.165391 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/kube-rbac-proxy-frr/0.log" Nov 25 13:26:26 crc kubenswrapper[4675]: I1125 13:26:26.287469 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/kube-rbac-proxy/0.log" Nov 25 13:26:26 crc kubenswrapper[4675]: I1125 13:26:26.431390 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/reloader/0.log" Nov 25 13:26:26 crc kubenswrapper[4675]: I1125 13:26:26.629099 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-2vqjx_f2695e9e-a774-4ab3-823a-6ea088db6ae8/frr-k8s-webhook-server/0.log" Nov 25 13:26:26 crc kubenswrapper[4675]: I1125 13:26:26.890622 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5db469f446-85gxv_eb100a90-931c-4daa-8466-49a1ae50185b/manager/0.log" Nov 25 13:26:27 crc kubenswrapper[4675]: I1125 13:26:27.147690 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5745948454-t8f8s_fa47f959-6c90-4cbf-b9a2-1d1e152414da/webhook-server/0.log" Nov 25 13:26:27 crc kubenswrapper[4675]: I1125 13:26:27.287559 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/frr/0.log" Nov 25 13:26:27 crc kubenswrapper[4675]: I1125 13:26:27.373387 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-jwm85_c2d490ab-73e3-4ff8-a9e1-1359fa135b87/kube-rbac-proxy/0.log" Nov 25 13:26:27 crc kubenswrapper[4675]: I1125 13:26:27.743416 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-jwm85_c2d490ab-73e3-4ff8-a9e1-1359fa135b87/speaker/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.198129 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/util/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.482857 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/util/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.494307 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/pull/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.542981 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/pull/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.729619 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/pull/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.737166 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/util/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.764783 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/extract/0.log" Nov 25 13:26:41 crc kubenswrapper[4675]: I1125 13:26:41.992360 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-utilities/0.log" Nov 25 13:26:42 crc kubenswrapper[4675]: I1125 13:26:42.229683 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-content/0.log" Nov 25 13:26:42 crc kubenswrapper[4675]: I1125 13:26:42.246872 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-utilities/0.log" Nov 25 13:26:42 crc kubenswrapper[4675]: I1125 13:26:42.254400 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-content/0.log" Nov 25 13:26:42 crc kubenswrapper[4675]: I1125 13:26:42.444433 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-utilities/0.log" Nov 25 13:26:42 crc kubenswrapper[4675]: I1125 13:26:42.817074 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-content/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.014247 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-utilities/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.202317 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/registry-server/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.260872 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-content/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.290546 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-utilities/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.349312 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-content/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.554793 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-content/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.559916 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-utilities/0.log" Nov 25 13:26:43 crc kubenswrapper[4675]: I1125 13:26:43.905742 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/util/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.123952 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/pull/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.148498 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/registry-server/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.175225 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/util/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.280038 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/pull/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.412288 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/util/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.468697 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/pull/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.476170 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/extract/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.700998 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-gft6s_b0a290f6-aa83-4c86-80ba-5f48e9a78c36/marketplace-operator/0.log" Nov 25 13:26:44 crc kubenswrapper[4675]: I1125 13:26:44.756665 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-utilities/0.log" Nov 25 13:26:45 crc kubenswrapper[4675]: I1125 13:26:45.017254 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-content/0.log" Nov 25 13:26:45 crc kubenswrapper[4675]: I1125 13:26:45.204741 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-content/0.log" Nov 25 13:26:45 crc kubenswrapper[4675]: I1125 13:26:45.204884 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-utilities/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.240062 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-utilities/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.306567 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-content/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.396074 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/registry-server/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.397852 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-utilities/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.515871 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-utilities/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.577147 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-content/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.580338 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-content/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.855397 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-content/0.log" Nov 25 13:26:46 crc kubenswrapper[4675]: I1125 13:26:46.880801 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-utilities/0.log" Nov 25 13:26:47 crc kubenswrapper[4675]: I1125 13:26:47.049926 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/registry-server/0.log" Nov 25 13:27:12 crc kubenswrapper[4675]: E1125 13:27:12.165016 4675 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.129.56.9:48922->38.129.56.9:40955: write tcp 38.129.56.9:48922->38.129.56.9:40955: write: broken pipe Nov 25 13:27:22 crc kubenswrapper[4675]: E1125 13:27:22.328325 4675 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.129.56.9:55470->38.129.56.9:40955: write tcp 38.129.56.9:55470->38.129.56.9:40955: write: connection reset by peer Nov 25 13:27:43 crc kubenswrapper[4675]: I1125 13:27:43.662230 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:27:43 crc kubenswrapper[4675]: I1125 13:27:43.663785 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:28:13 crc kubenswrapper[4675]: I1125 13:28:13.662083 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:28:13 crc kubenswrapper[4675]: I1125 13:28:13.663213 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.258654 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 13:28:19 crc kubenswrapper[4675]: E1125 13:28:19.261793 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="extract-content" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.261846 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="extract-content" Nov 25 13:28:19 crc kubenswrapper[4675]: E1125 13:28:19.261882 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="extract-utilities" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.261891 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="extract-utilities" Nov 25 13:28:19 crc kubenswrapper[4675]: E1125 13:28:19.261915 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18415355-82ef-43e5-b0ac-feb493e7d00e" containerName="container-00" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.261923 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="18415355-82ef-43e5-b0ac-feb493e7d00e" containerName="container-00" Nov 25 13:28:19 crc kubenswrapper[4675]: E1125 13:28:19.261940 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="registry-server" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.261947 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="registry-server" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.262231 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="fccf422b-ba28-4570-81ea-a8f4943fd51e" containerName="registry-server" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.262267 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="18415355-82ef-43e5-b0ac-feb493e7d00e" containerName="container-00" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.264196 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.271417 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.272049 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.295148 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.424392 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.424500 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.526045 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.526197 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.526305 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.571225 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:19 crc kubenswrapper[4675]: I1125 13:28:19.597095 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:20 crc kubenswrapper[4675]: I1125 13:28:20.173366 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 13:28:20 crc kubenswrapper[4675]: I1125 13:28:20.210251 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03","Type":"ContainerStarted","Data":"113030d84d96905f70ec7d6c98905066d8fe5b46cfcbd75f39a3dc3bc3e7e515"} Nov 25 13:28:21 crc kubenswrapper[4675]: I1125 13:28:21.220018 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03","Type":"ContainerStarted","Data":"870a67625d36d56679a6f9ad2dc2465f7486c10e27fe9b0d3ca22334a3f4aab6"} Nov 25 13:28:22 crc kubenswrapper[4675]: I1125 13:28:22.229630 4675 generic.go:334] "Generic (PLEG): container finished" podID="89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03" containerID="870a67625d36d56679a6f9ad2dc2465f7486c10e27fe9b0d3ca22334a3f4aab6" exitCode=0 Nov 25 13:28:22 crc kubenswrapper[4675]: I1125 13:28:22.229688 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03","Type":"ContainerDied","Data":"870a67625d36d56679a6f9ad2dc2465f7486c10e27fe9b0d3ca22334a3f4aab6"} Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.601493 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.724976 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kube-api-access\") pod \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.725187 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kubelet-dir\") pod \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\" (UID: \"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03\") " Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.725296 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03" (UID: "89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.726994 4675 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.732554 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03" (UID: "89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:28:23 crc kubenswrapper[4675]: I1125 13:28:23.828890 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 13:28:24 crc kubenswrapper[4675]: I1125 13:28:24.248269 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03","Type":"ContainerDied","Data":"113030d84d96905f70ec7d6c98905066d8fe5b46cfcbd75f39a3dc3bc3e7e515"} Nov 25 13:28:24 crc kubenswrapper[4675]: I1125 13:28:24.248310 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="113030d84d96905f70ec7d6c98905066d8fe5b46cfcbd75f39a3dc3bc3e7e515" Nov 25 13:28:24 crc kubenswrapper[4675]: I1125 13:28:24.248332 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.257408 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 13:28:25 crc kubenswrapper[4675]: E1125 13:28:25.258372 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03" containerName="pruner" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.258391 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03" containerName="pruner" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.258752 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="89d11a6b-fc32-4d7d-ab52-bb4a5c19ae03" containerName="pruner" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.259980 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.262946 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-var-lock\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.263405 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.263601 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/526934b8-a76a-4c95-b2ae-84d5bb58742a-kube-api-access\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.268466 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.269051 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.278469 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.365014 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.365333 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/526934b8-a76a-4c95-b2ae-84d5bb58742a-kube-api-access\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.365502 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-var-lock\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.365545 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-var-lock\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.365154 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.397131 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/526934b8-a76a-4c95-b2ae-84d5bb58742a-kube-api-access\") pod \"installer-9-crc\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:25 crc kubenswrapper[4675]: I1125 13:28:25.589935 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:28:26 crc kubenswrapper[4675]: I1125 13:28:26.087866 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 13:28:26 crc kubenswrapper[4675]: I1125 13:28:26.274764 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"526934b8-a76a-4c95-b2ae-84d5bb58742a","Type":"ContainerStarted","Data":"a7535062ce6a7c9a8669d77edf617333dc5c6275d663bb03719f94b7126ca2a3"} Nov 25 13:28:27 crc kubenswrapper[4675]: I1125 13:28:27.283659 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"526934b8-a76a-4c95-b2ae-84d5bb58742a","Type":"ContainerStarted","Data":"02cc51bd59821faa5599095dbb353d1dafb7ae73d852b47e4a9a6519b0c725b4"} Nov 25 13:28:27 crc kubenswrapper[4675]: I1125 13:28:27.307411 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.307393696 podStartE2EDuration="2.307393696s" podCreationTimestamp="2025-11-25 13:28:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:28:27.300170377 +0000 UTC m=+3652.471762728" watchObservedRunningTime="2025-11-25 13:28:27.307393696 +0000 UTC m=+3652.478986037" Nov 25 13:28:43 crc kubenswrapper[4675]: I1125 13:28:43.661885 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:28:43 crc kubenswrapper[4675]: I1125 13:28:43.662470 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:28:43 crc kubenswrapper[4675]: I1125 13:28:43.662520 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:28:43 crc kubenswrapper[4675]: I1125 13:28:43.663308 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:28:43 crc kubenswrapper[4675]: I1125 13:28:43.663988 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" gracePeriod=600 Nov 25 13:28:43 crc kubenswrapper[4675]: E1125 13:28:43.798986 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:28:44 crc kubenswrapper[4675]: I1125 13:28:44.441398 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" exitCode=0 Nov 25 13:28:44 crc kubenswrapper[4675]: I1125 13:28:44.441450 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d"} Nov 25 13:28:44 crc kubenswrapper[4675]: I1125 13:28:44.441489 4675 scope.go:117] "RemoveContainer" containerID="2803df1792774157acda9ae9775048cc478a841249be56ff9bc1791032dca380" Nov 25 13:28:44 crc kubenswrapper[4675]: I1125 13:28:44.442088 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:28:44 crc kubenswrapper[4675]: E1125 13:28:44.442380 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:28:45 crc kubenswrapper[4675]: I1125 13:28:45.456507 4675 generic.go:334] "Generic (PLEG): container finished" podID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerID="13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97" exitCode=0 Nov 25 13:28:45 crc kubenswrapper[4675]: I1125 13:28:45.456999 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6smlh/must-gather-kbvj9" event={"ID":"3697c881-eee5-4d41-856b-8a8064d1cf28","Type":"ContainerDied","Data":"13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97"} Nov 25 13:28:45 crc kubenswrapper[4675]: I1125 13:28:45.458422 4675 scope.go:117] "RemoveContainer" containerID="13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97" Nov 25 13:28:45 crc kubenswrapper[4675]: I1125 13:28:45.610656 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6smlh_must-gather-kbvj9_3697c881-eee5-4d41-856b-8a8064d1cf28/gather/0.log" Nov 25 13:28:53 crc kubenswrapper[4675]: I1125 13:28:53.606440 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6smlh/must-gather-kbvj9"] Nov 25 13:28:53 crc kubenswrapper[4675]: I1125 13:28:53.607590 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6smlh/must-gather-kbvj9" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="copy" containerID="cri-o://86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b" gracePeriod=2 Nov 25 13:28:53 crc kubenswrapper[4675]: I1125 13:28:53.613608 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6smlh/must-gather-kbvj9"] Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.044205 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6smlh_must-gather-kbvj9_3697c881-eee5-4d41-856b-8a8064d1cf28/copy/0.log" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.044989 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.223219 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kw2pb\" (UniqueName: \"kubernetes.io/projected/3697c881-eee5-4d41-856b-8a8064d1cf28-kube-api-access-kw2pb\") pod \"3697c881-eee5-4d41-856b-8a8064d1cf28\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.223407 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3697c881-eee5-4d41-856b-8a8064d1cf28-must-gather-output\") pod \"3697c881-eee5-4d41-856b-8a8064d1cf28\" (UID: \"3697c881-eee5-4d41-856b-8a8064d1cf28\") " Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.238584 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3697c881-eee5-4d41-856b-8a8064d1cf28-kube-api-access-kw2pb" (OuterVolumeSpecName: "kube-api-access-kw2pb") pod "3697c881-eee5-4d41-856b-8a8064d1cf28" (UID: "3697c881-eee5-4d41-856b-8a8064d1cf28"). InnerVolumeSpecName "kube-api-access-kw2pb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.326465 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kw2pb\" (UniqueName: \"kubernetes.io/projected/3697c881-eee5-4d41-856b-8a8064d1cf28-kube-api-access-kw2pb\") on node \"crc\" DevicePath \"\"" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.381471 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3697c881-eee5-4d41-856b-8a8064d1cf28-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3697c881-eee5-4d41-856b-8a8064d1cf28" (UID: "3697c881-eee5-4d41-856b-8a8064d1cf28"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.433222 4675 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3697c881-eee5-4d41-856b-8a8064d1cf28-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.564679 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6smlh_must-gather-kbvj9_3697c881-eee5-4d41-856b-8a8064d1cf28/copy/0.log" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.565099 4675 generic.go:334] "Generic (PLEG): container finished" podID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerID="86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b" exitCode=143 Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.565158 4675 scope.go:117] "RemoveContainer" containerID="86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.565331 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6smlh/must-gather-kbvj9" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.593282 4675 scope.go:117] "RemoveContainer" containerID="13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.651030 4675 scope.go:117] "RemoveContainer" containerID="86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b" Nov 25 13:28:54 crc kubenswrapper[4675]: E1125 13:28:54.651551 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b\": container with ID starting with 86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b not found: ID does not exist" containerID="86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.651598 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b"} err="failed to get container status \"86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b\": rpc error: code = NotFound desc = could not find container \"86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b\": container with ID starting with 86117b0a4da9f2c0cdc7abe776bdfd37f0ba5d0aea59dfc51b6ff8b6eee2f61b not found: ID does not exist" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.651627 4675 scope.go:117] "RemoveContainer" containerID="13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97" Nov 25 13:28:54 crc kubenswrapper[4675]: E1125 13:28:54.651953 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97\": container with ID starting with 13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97 not found: ID does not exist" containerID="13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97" Nov 25 13:28:54 crc kubenswrapper[4675]: I1125 13:28:54.651980 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97"} err="failed to get container status \"13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97\": rpc error: code = NotFound desc = could not find container \"13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97\": container with ID starting with 13a827ac30fef2a82e0613e6709307a339977ac3bd872899450cc8c1db1d4f97 not found: ID does not exist" Nov 25 13:28:55 crc kubenswrapper[4675]: I1125 13:28:55.538718 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:28:55 crc kubenswrapper[4675]: E1125 13:28:55.539316 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:28:55 crc kubenswrapper[4675]: I1125 13:28:55.542476 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" path="/var/lib/kubelet/pods/3697c881-eee5-4d41-856b-8a8064d1cf28/volumes" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.151808 4675 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.152821 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a" gracePeriod=15 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.152912 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8" gracePeriod=15 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.153157 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1" gracePeriod=15 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.153209 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c" gracePeriod=15 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.153242 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3" gracePeriod=15 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.153590 4675 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154284 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154310 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154322 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154330 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154355 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="copy" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154364 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="copy" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154375 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154384 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154401 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154408 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154427 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154436 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154455 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154463 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154477 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="gather" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154486 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="gather" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.154510 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154518 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154751 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154769 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154785 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154796 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="gather" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154803 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154817 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154843 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="3697c881-eee5-4d41-856b-8a8064d1cf28" containerName="copy" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.154860 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.157306 4675 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.158236 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.163543 4675 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.221870 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315494 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315561 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315622 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315646 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315695 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315734 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.315752 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.316319 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418163 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418240 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418277 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418327 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418326 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418372 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418351 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418391 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418350 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418373 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418442 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418503 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418543 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418547 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418572 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.418590 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.519273 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:29:04 crc kubenswrapper[4675]: E1125 13:29:04.567164 4675 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.9:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b42ff96ee38ee openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 13:29:04.56196939 +0000 UTC m=+3689.733561751,LastTimestamp:2025-11-25 13:29:04.56196939 +0000 UTC m=+3689.733561751,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.687091 4675 generic.go:334] "Generic (PLEG): container finished" podID="526934b8-a76a-4c95-b2ae-84d5bb58742a" containerID="02cc51bd59821faa5599095dbb353d1dafb7ae73d852b47e4a9a6519b0c725b4" exitCode=0 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.687196 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"526934b8-a76a-4c95-b2ae-84d5bb58742a","Type":"ContainerDied","Data":"02cc51bd59821faa5599095dbb353d1dafb7ae73d852b47e4a9a6519b0c725b4"} Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.688868 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.689229 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.691749 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.693783 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.695133 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8" exitCode=0 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.695164 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1" exitCode=0 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.695179 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c" exitCode=0 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.695188 4675 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3" exitCode=2 Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.695264 4675 scope.go:117] "RemoveContainer" containerID="e52f393de455f9d011353d7d634bd14f1377aae1a5d676dde04542dbf8254a1c" Nov 25 13:29:04 crc kubenswrapper[4675]: I1125 13:29:04.699508 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"84ab0cbf31fa10e46f629105a85ee8171397d60a1615572987fccf5168d7af52"} Nov 25 13:29:05 crc kubenswrapper[4675]: I1125 13:29:05.540374 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:05 crc kubenswrapper[4675]: I1125 13:29:05.541210 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:05 crc kubenswrapper[4675]: E1125 13:29:05.543513 4675 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/glance-glance-default-external-api-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/glance-glance-default-external-api-0\": dial tcp 38.129.56.9:6443: connect: connection refused" pod="openstack/glance-default-external-api-0" volumeName="glance" Nov 25 13:29:05 crc kubenswrapper[4675]: I1125 13:29:05.711643 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 13:29:05 crc kubenswrapper[4675]: I1125 13:29:05.716048 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"7ed8f81e90626eebe0845ed795224d18fe91d4977556f9ac4d2f7fed4650f86e"} Nov 25 13:29:05 crc kubenswrapper[4675]: I1125 13:29:05.716174 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:05 crc kubenswrapper[4675]: I1125 13:29:05.716641 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.048844 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.049803 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.050176 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.148594 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-kubelet-dir\") pod \"526934b8-a76a-4c95-b2ae-84d5bb58742a\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.148938 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/526934b8-a76a-4c95-b2ae-84d5bb58742a-kube-api-access\") pod \"526934b8-a76a-4c95-b2ae-84d5bb58742a\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.149110 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-var-lock\") pod \"526934b8-a76a-4c95-b2ae-84d5bb58742a\" (UID: \"526934b8-a76a-4c95-b2ae-84d5bb58742a\") " Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.148701 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "526934b8-a76a-4c95-b2ae-84d5bb58742a" (UID: "526934b8-a76a-4c95-b2ae-84d5bb58742a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.149160 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-var-lock" (OuterVolumeSpecName: "var-lock") pod "526934b8-a76a-4c95-b2ae-84d5bb58742a" (UID: "526934b8-a76a-4c95-b2ae-84d5bb58742a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.149902 4675 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.150009 4675 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/526934b8-a76a-4c95-b2ae-84d5bb58742a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.155087 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/526934b8-a76a-4c95-b2ae-84d5bb58742a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "526934b8-a76a-4c95-b2ae-84d5bb58742a" (UID: "526934b8-a76a-4c95-b2ae-84d5bb58742a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.252600 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/526934b8-a76a-4c95-b2ae-84d5bb58742a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.731784 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.732956 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"526934b8-a76a-4c95-b2ae-84d5bb58742a","Type":"ContainerDied","Data":"a7535062ce6a7c9a8669d77edf617333dc5c6275d663bb03719f94b7126ca2a3"} Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.733009 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7535062ce6a7c9a8669d77edf617333dc5c6275d663bb03719f94b7126ca2a3" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.751711 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:06 crc kubenswrapper[4675]: I1125 13:29:06.752269 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.083396 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.084201 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.084760 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.084982 4675 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.085341 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092031 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092092 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092138 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092197 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092237 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092320 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092675 4675 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092690 4675 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.092698 4675 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.112406 4675 scope.go:117] "RemoveContainer" containerID="073a74eb1d39771d4a98badaf1bc7aa09af5c329676af180c2b51faeda034b8c" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.132609 4675 scope.go:117] "RemoveContainer" containerID="23b06d02cda8d547c1c5fe96fef43eae96c09d0d6ee3544b282fb2014d92480a" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.154121 4675 scope.go:117] "RemoveContainer" containerID="20f9a7cd1e4e7b79bd4c823114ca450dc5eed4e4f0d8c7001440b94c0bb0d6d8" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.172997 4675 scope.go:117] "RemoveContainer" containerID="b3f41291621bf20ff3bcbab1bec1f3954f2b2f18b83871e4503ae458aa37b4c1" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.191094 4675 scope.go:117] "RemoveContainer" containerID="e95f7054cfafc55bf9decd1afa45dd5805f098416139b1047e2d2fe084cbb1c3" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.208272 4675 scope.go:117] "RemoveContainer" containerID="65bcc8042df93ee72b57fc919612f2fa80734368105cad2c868e71c8663ba6d3" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.545198 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.744518 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.745583 4675 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.746047 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.746496 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.749889 4675 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.750258 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:07 crc kubenswrapper[4675]: I1125 13:29:07.750522 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:10 crc kubenswrapper[4675]: I1125 13:29:10.531992 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.532465 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:29:10 crc kubenswrapper[4675]: I1125 13:29:10.579396 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.794087 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T13:29:10Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T13:29:10Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T13:29:10Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T13:29:10Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.796180 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.796670 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.796972 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.797242 4675 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:10 crc kubenswrapper[4675]: E1125 13:29:10.797266 4675 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 13:29:11 crc kubenswrapper[4675]: E1125 13:29:11.600490 4675 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.9:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b42ff96ee38ee openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 13:29:04.56196939 +0000 UTC m=+3689.733561751,LastTimestamp:2025-11-25 13:29:04.56196939 +0000 UTC m=+3689.733561751,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.412778 4675 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.413350 4675 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.413564 4675 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.413748 4675 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.413977 4675 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:14 crc kubenswrapper[4675]: I1125 13:29:14.414009 4675 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.415565 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="200ms" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.616770 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="400ms" Nov 25 13:29:14 crc kubenswrapper[4675]: E1125 13:29:14.623409 4675 token_manager.go:121] "Couldn't update token" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa/token\": dial tcp 38.129.56.9:6443: connect: connection refused" cacheKey="\"openshift-controller-manager-sa\"/\"openshift-controller-manager\"/[]string(nil)/3607/v1.BoundObjectReference{Kind:\"Pod\", APIVersion:\"v1\", Name:\"controller-manager-77b8844fb6-757qm\", UID:\"e09bd3ea-6adc-4088-a8e2-e768f12c15ef\"}" Nov 25 13:29:15 crc kubenswrapper[4675]: E1125 13:29:15.017954 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="800ms" Nov 25 13:29:15 crc kubenswrapper[4675]: I1125 13:29:15.539552 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:15 crc kubenswrapper[4675]: I1125 13:29:15.540200 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:15 crc kubenswrapper[4675]: E1125 13:29:15.820436 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="1.6s" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.531462 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.532906 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.533498 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.552660 4675 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.552702 4675 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:16 crc kubenswrapper[4675]: E1125 13:29:16.553016 4675 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.553674 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.837175 4675 generic.go:334] "Generic (PLEG): container finished" podID="eb100a90-931c-4daa-8466-49a1ae50185b" containerID="de89a8ed7880b57083e71241b537176b458e508d2823635acc38832d974e99fb" exitCode=1 Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.837247 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" event={"ID":"eb100a90-931c-4daa-8466-49a1ae50185b","Type":"ContainerDied","Data":"de89a8ed7880b57083e71241b537176b458e508d2823635acc38832d974e99fb"} Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.838046 4675 scope.go:117] "RemoveContainer" containerID="de89a8ed7880b57083e71241b537176b458e508d2823635acc38832d974e99fb" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.838107 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.838328 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.838548 4675 status_manager.go:851] "Failed to get status for pod" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5db469f446-85gxv\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:16 crc kubenswrapper[4675]: I1125 13:29:16.840742 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"306879005e09473b753740f26c6acfecede4a6b7d453f3a34f6c1bb2820fb8c4"} Nov 25 13:29:17 crc kubenswrapper[4675]: E1125 13:29:17.421291 4675 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.9:6443: connect: connection refused" interval="3.2s" Nov 25 13:29:17 crc kubenswrapper[4675]: E1125 13:29:17.574462 4675 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/mysql-db-openstack-galera-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/mysql-db-openstack-galera-0\": dial tcp 38.129.56.9:6443: connect: connection refused" pod="openstack/openstack-galera-0" volumeName="mysql-db" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.849902 4675 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="e0ef6b0fc83480b4436fb5458cbc9ca08847d184d2a862c08219fbbdbc328101" exitCode=0 Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.849993 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"e0ef6b0fc83480b4436fb5458cbc9ca08847d184d2a862c08219fbbdbc328101"} Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.850177 4675 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.850216 4675 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:17 crc kubenswrapper[4675]: E1125 13:29:17.850880 4675 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.851234 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.851597 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.851866 4675 status_manager.go:851] "Failed to get status for pod" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5db469f446-85gxv\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.853439 4675 generic.go:334] "Generic (PLEG): container finished" podID="eb100a90-931c-4daa-8466-49a1ae50185b" containerID="647a4c8d5b731f44a664f2a451a583ec784832ee99a0b903b10670d6eb329997" exitCode=1 Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.853521 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" event={"ID":"eb100a90-931c-4daa-8466-49a1ae50185b","Type":"ContainerDied","Data":"647a4c8d5b731f44a664f2a451a583ec784832ee99a0b903b10670d6eb329997"} Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.853609 4675 scope.go:117] "RemoveContainer" containerID="de89a8ed7880b57083e71241b537176b458e508d2823635acc38832d974e99fb" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.854397 4675 scope.go:117] "RemoveContainer" containerID="647a4c8d5b731f44a664f2a451a583ec784832ee99a0b903b10670d6eb329997" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.854413 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.854642 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: E1125 13:29:17.854702 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-5db469f446-85gxv_metallb-system(eb100a90-931c-4daa-8466-49a1ae50185b)\"" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.855018 4675 status_manager.go:851] "Failed to get status for pod" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5db469f446-85gxv\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.856647 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.856695 4675 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90" exitCode=1 Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.856726 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90"} Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.857320 4675 scope.go:117] "RemoveContainer" containerID="1302a87d46e7ca8bdd16737fb3795dddd92b39116d95df547630a3d0c78f9e90" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.858341 4675 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.858692 4675 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.859658 4675 status_manager.go:851] "Failed to get status for pod" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.861695 4675 status_manager.go:851] "Failed to get status for pod" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5db469f446-85gxv\": dial tcp 38.129.56.9:6443: connect: connection refused" Nov 25 13:29:17 crc kubenswrapper[4675]: I1125 13:29:17.948977 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 13:29:18 crc kubenswrapper[4675]: E1125 13:29:18.602252 4675 token_manager.go:121] "Couldn't update token" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/serviceaccounts/controller/token\": dial tcp 38.129.56.9:6443: connect: connection refused" cacheKey="\"controller\"/\"metallb-system\"/[]string(nil)/3607/v1.BoundObjectReference{Kind:\"Pod\", APIVersion:\"v1\", Name:\"controller-6c7b4b5f48-dnrkj\", UID:\"c7735c55-9453-4052-8156-30e1155c73eb\"}" Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.804173 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.868876 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.869154 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"19a918a27874ffcf43a74b2b4f1d34075d9aba560722cbebfd69072cc172f8c3"} Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.873800 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"00532541570ee5d9ac284c1b3269179187151d7b2a0c5ab85b0d3e2064c0630f"} Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.873862 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7cc84aaafcb25454ebf0ce63f025babdeb4e5cfb0f71332de023e1064b0151b5"} Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.873878 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ebb4a3375eb9cad552a75c5131b3f012531b423a20a8a36a3b490d7b6befb1e6"} Nov 25 13:29:18 crc kubenswrapper[4675]: I1125 13:29:18.873888 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"471d930520d4631a6155b5959c49698a1dc4f51c7f3e7a63424214a06cfa2291"} Nov 25 13:29:19 crc kubenswrapper[4675]: I1125 13:29:19.886581 4675 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:19 crc kubenswrapper[4675]: I1125 13:29:19.886872 4675 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:19 crc kubenswrapper[4675]: I1125 13:29:19.887218 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b8f3ab99da909c8cad4aaee690eb96fe8d9f6f23c935909ca5a712fa08ef5b0c"} Nov 25 13:29:19 crc kubenswrapper[4675]: I1125 13:29:19.887258 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:20 crc kubenswrapper[4675]: I1125 13:29:20.580166 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.554067 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.554108 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.559608 4675 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]log ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]etcd ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/priority-and-fairness-filter ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-apiextensions-informers ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-apiextensions-controllers ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/crd-informer-synced ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-system-namespaces-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 25 13:29:21 crc kubenswrapper[4675]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/bootstrap-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/start-kube-aggregator-informers ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-registration-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-discovery-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]autoregister-completion ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-openapi-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 25 13:29:21 crc kubenswrapper[4675]: livez check failed Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.559664 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.717131 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.727345 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 13:29:21 crc kubenswrapper[4675]: I1125 13:29:21.901967 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 13:29:22 crc kubenswrapper[4675]: I1125 13:29:22.532859 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:29:22 crc kubenswrapper[4675]: E1125 13:29:22.533399 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:29:22 crc kubenswrapper[4675]: I1125 13:29:22.982981 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 13:29:22 crc kubenswrapper[4675]: I1125 13:29:22.983832 4675 scope.go:117] "RemoveContainer" containerID="647a4c8d5b731f44a664f2a451a583ec784832ee99a0b903b10670d6eb329997" Nov 25 13:29:22 crc kubenswrapper[4675]: E1125 13:29:22.984183 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-5db469f446-85gxv_metallb-system(eb100a90-931c-4daa-8466-49a1ae50185b)\"" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" Nov 25 13:29:24 crc kubenswrapper[4675]: I1125 13:29:24.932810 4675 generic.go:334] "Generic (PLEG): container finished" podID="2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12" containerID="516b2b20641a13a4596239c08ebdfdb2fa77d0b3f2ebff526af35c2d30b1c886" exitCode=1 Nov 25 13:29:24 crc kubenswrapper[4675]: I1125 13:29:24.933123 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" event={"ID":"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12","Type":"ContainerDied","Data":"516b2b20641a13a4596239c08ebdfdb2fa77d0b3f2ebff526af35c2d30b1c886"} Nov 25 13:29:24 crc kubenswrapper[4675]: I1125 13:29:24.933899 4675 scope.go:117] "RemoveContainer" containerID="516b2b20641a13a4596239c08ebdfdb2fa77d0b3f2ebff526af35c2d30b1c886" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.068415 4675 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.561251 4675 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="0cf86a14-718c-4f03-b7b3-d8a2adbd3baa" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.944388 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" event={"ID":"2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12","Type":"ContainerStarted","Data":"07c315f543fa56133a821fd33ce37d1b6ef652f1e22bdc852441eca93d339b32"} Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.944605 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.947048 4675 generic.go:334] "Generic (PLEG): container finished" podID="01418b22-5bf7-4486-bc9c-fe8d6d757b3d" containerID="771ad606b5fa16113322ff050190fd8022b2d7dea1753e17e15cbc216aeab07d" exitCode=1 Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.947092 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" event={"ID":"01418b22-5bf7-4486-bc9c-fe8d6d757b3d","Type":"ContainerDied","Data":"771ad606b5fa16113322ff050190fd8022b2d7dea1753e17e15cbc216aeab07d"} Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.947412 4675 scope.go:117] "RemoveContainer" containerID="771ad606b5fa16113322ff050190fd8022b2d7dea1753e17e15cbc216aeab07d" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.950779 4675 generic.go:334] "Generic (PLEG): container finished" podID="e8f46595-6a0c-4b55-9839-3360395606f7" containerID="795f466779587cb11748d192af45c1c5270921a968dd364bc26091a80cbd27b7" exitCode=1 Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.950851 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerDied","Data":"795f466779587cb11748d192af45c1c5270921a968dd364bc26091a80cbd27b7"} Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.951085 4675 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.951100 4675 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcc2869e-ed3f-48cf-9f10-a80e24888121" Nov 25 13:29:25 crc kubenswrapper[4675]: I1125 13:29:25.951396 4675 scope.go:117] "RemoveContainer" containerID="795f466779587cb11748d192af45c1c5270921a968dd364bc26091a80cbd27b7" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.056614 4675 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="0cf86a14-718c-4f03-b7b3-d8a2adbd3baa" Nov 25 13:29:26 crc kubenswrapper[4675]: E1125 13:29:26.739109 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8f46595_6a0c_4b55_9839_3360395606f7.slice/crio-conmon-ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285.scope\": RecentStats: unable to find data in memory cache]" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.963612 4675 generic.go:334] "Generic (PLEG): container finished" podID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" containerID="45e4f3a968a2c8cab5ba3984005c94977c2d9e1a56138690e5a2607101e378ea" exitCode=1 Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.963693 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerDied","Data":"45e4f3a968a2c8cab5ba3984005c94977c2d9e1a56138690e5a2607101e378ea"} Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.964519 4675 scope.go:117] "RemoveContainer" containerID="45e4f3a968a2c8cab5ba3984005c94977c2d9e1a56138690e5a2607101e378ea" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.969681 4675 generic.go:334] "Generic (PLEG): container finished" podID="88da95fd-fdf9-402d-90d8-e742f92cffbb" containerID="061ecdf9aa2d0f61605f4accb5e6b453e9328d3716a9b60260095559670ee9c9" exitCode=1 Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.969743 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerDied","Data":"061ecdf9aa2d0f61605f4accb5e6b453e9328d3716a9b60260095559670ee9c9"} Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.974849 4675 scope.go:117] "RemoveContainer" containerID="061ecdf9aa2d0f61605f4accb5e6b453e9328d3716a9b60260095559670ee9c9" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.977467 4675 generic.go:334] "Generic (PLEG): container finished" podID="e6ff98cd-4075-49dd-b40b-d1923298513e" containerID="6acc2abc8938e97f60eb5f959bb28d00e64d6631a975cd53e288956fa98825e4" exitCode=1 Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.977543 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerDied","Data":"6acc2abc8938e97f60eb5f959bb28d00e64d6631a975cd53e288956fa98825e4"} Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.978123 4675 scope.go:117] "RemoveContainer" containerID="6acc2abc8938e97f60eb5f959bb28d00e64d6631a975cd53e288956fa98825e4" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.981631 4675 generic.go:334] "Generic (PLEG): container finished" podID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" containerID="5fbfece02fe84e009a5d9924115a3d3f5a247d2e50ff39d22cfd53a44aa51915" exitCode=1 Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.981703 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerDied","Data":"5fbfece02fe84e009a5d9924115a3d3f5a247d2e50ff39d22cfd53a44aa51915"} Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.982215 4675 scope.go:117] "RemoveContainer" containerID="5fbfece02fe84e009a5d9924115a3d3f5a247d2e50ff39d22cfd53a44aa51915" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.985126 4675 generic.go:334] "Generic (PLEG): container finished" podID="223d4b40-6f09-41f5-816d-7e82b45b4b90" containerID="4738d8d88de0407838d6011e7e0aa3a8cb6b91f80e567b16bc0e02fb8324d0b7" exitCode=1 Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.985252 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerDied","Data":"4738d8d88de0407838d6011e7e0aa3a8cb6b91f80e567b16bc0e02fb8324d0b7"} Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.986170 4675 scope.go:117] "RemoveContainer" containerID="4738d8d88de0407838d6011e7e0aa3a8cb6b91f80e567b16bc0e02fb8324d0b7" Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.989872 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" event={"ID":"01418b22-5bf7-4486-bc9c-fe8d6d757b3d","Type":"ContainerStarted","Data":"0ce66d4e4b8d6538f7f64dab54dd96fecb3c231d92d09c320aa3230c11ba27cf"} Nov 25 13:29:26 crc kubenswrapper[4675]: I1125 13:29:26.990147 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.002062 4675 generic.go:334] "Generic (PLEG): container finished" podID="e8f46595-6a0c-4b55-9839-3360395606f7" containerID="ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.002163 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerDied","Data":"ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.002203 4675 scope.go:117] "RemoveContainer" containerID="795f466779587cb11748d192af45c1c5270921a968dd364bc26091a80cbd27b7" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.002955 4675 scope.go:117] "RemoveContainer" containerID="ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285" Nov 25 13:29:27 crc kubenswrapper[4675]: E1125 13:29:27.003236 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-867d87977b-2z8vf_openstack-operators(e8f46595-6a0c-4b55-9839-3360395606f7)\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podUID="e8f46595-6a0c-4b55-9839-3360395606f7" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.008878 4675 generic.go:334] "Generic (PLEG): container finished" podID="986b1a58-05d0-4beb-9199-a7564c809455" containerID="4aa80bec7c4faa4ccae0910a81ac4bd6fb51056a689f0a3703ab648a6b275892" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.008980 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerDied","Data":"4aa80bec7c4faa4ccae0910a81ac4bd6fb51056a689f0a3703ab648a6b275892"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.010563 4675 scope.go:117] "RemoveContainer" containerID="4aa80bec7c4faa4ccae0910a81ac4bd6fb51056a689f0a3703ab648a6b275892" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.029861 4675 generic.go:334] "Generic (PLEG): container finished" podID="18941428-e287-4374-93e0-3209cdbbf7d7" containerID="02dfa88b12197fdadfd2aa144f0b9efa359284fda4897a2c6d10709625ce22d3" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.029927 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerDied","Data":"02dfa88b12197fdadfd2aa144f0b9efa359284fda4897a2c6d10709625ce22d3"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.030504 4675 scope.go:117] "RemoveContainer" containerID="02dfa88b12197fdadfd2aa144f0b9efa359284fda4897a2c6d10709625ce22d3" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.035798 4675 generic.go:334] "Generic (PLEG): container finished" podID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" containerID="d6f186a95189e2a38ebddc8a2c833e6b10afdb17812bf5904309989b130e47a1" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.035883 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerDied","Data":"d6f186a95189e2a38ebddc8a2c833e6b10afdb17812bf5904309989b130e47a1"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.036463 4675 scope.go:117] "RemoveContainer" containerID="d6f186a95189e2a38ebddc8a2c833e6b10afdb17812bf5904309989b130e47a1" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.039220 4675 generic.go:334] "Generic (PLEG): container finished" podID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" containerID="5812f299f3081490ef9642539720500dc151f5d3dbf453185e0464e66fd1f19e" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.039291 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerDied","Data":"5812f299f3081490ef9642539720500dc151f5d3dbf453185e0464e66fd1f19e"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.039945 4675 scope.go:117] "RemoveContainer" containerID="5812f299f3081490ef9642539720500dc151f5d3dbf453185e0464e66fd1f19e" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.045979 4675 generic.go:334] "Generic (PLEG): container finished" podID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" containerID="d983778d43e5dd18a742a7903be363e738ff496a4edc1825077bfc8e4e6ec8b1" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.046141 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerDied","Data":"d983778d43e5dd18a742a7903be363e738ff496a4edc1825077bfc8e4e6ec8b1"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.047020 4675 scope.go:117] "RemoveContainer" containerID="d983778d43e5dd18a742a7903be363e738ff496a4edc1825077bfc8e4e6ec8b1" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.059015 4675 generic.go:334] "Generic (PLEG): container finished" podID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" containerID="990b012da81571e2297f728312722bde123011371a34a3b8401a7d084b744334" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.059073 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerDied","Data":"990b012da81571e2297f728312722bde123011371a34a3b8401a7d084b744334"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.059702 4675 scope.go:117] "RemoveContainer" containerID="990b012da81571e2297f728312722bde123011371a34a3b8401a7d084b744334" Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.067777 4675 generic.go:334] "Generic (PLEG): container finished" podID="fbd303b9-17db-401e-acbf-1ef8219e36df" containerID="3b3c2e64f3037f4fe2358136d3b2de543be088851ee8c98e61218aeb327ab3a5" exitCode=1 Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.068455 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerDied","Data":"3b3c2e64f3037f4fe2358136d3b2de543be088851ee8c98e61218aeb327ab3a5"} Nov 25 13:29:27 crc kubenswrapper[4675]: I1125 13:29:27.068744 4675 scope.go:117] "RemoveContainer" containerID="3b3c2e64f3037f4fe2358136d3b2de543be088851ee8c98e61218aeb327ab3a5" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.079199 4675 generic.go:334] "Generic (PLEG): container finished" podID="bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48" containerID="8b0bd41d396f8608e02a965008b0e62af913967a6c272816d00de55040ec1e86" exitCode=1 Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.079355 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" event={"ID":"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48","Type":"ContainerDied","Data":"8b0bd41d396f8608e02a965008b0e62af913967a6c272816d00de55040ec1e86"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.081562 4675 scope.go:117] "RemoveContainer" containerID="8b0bd41d396f8608e02a965008b0e62af913967a6c272816d00de55040ec1e86" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.090995 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerStarted","Data":"cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.092200 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.097855 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerStarted","Data":"490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.098839 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.107419 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerStarted","Data":"d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.112019 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerStarted","Data":"e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.114852 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.114983 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.120089 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerStarted","Data":"15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.121003 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.129199 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerStarted","Data":"971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.129758 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.136696 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerStarted","Data":"b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.137700 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.146441 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerStarted","Data":"af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.147389 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.152948 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerStarted","Data":"06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.153164 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.154476 4675 generic.go:334] "Generic (PLEG): container finished" podID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" containerID="bdddf947ea1a6bdc9dafd93f3f61d64e22cef53a665cc93ccb1d99fea80e52bf" exitCode=1 Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.154564 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" event={"ID":"64b432ef-6de9-4d8d-84ce-78f2097bf31e","Type":"ContainerDied","Data":"bdddf947ea1a6bdc9dafd93f3f61d64e22cef53a665cc93ccb1d99fea80e52bf"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.154954 4675 scope.go:117] "RemoveContainer" containerID="bdddf947ea1a6bdc9dafd93f3f61d64e22cef53a665cc93ccb1d99fea80e52bf" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.159062 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerStarted","Data":"06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.159293 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.162679 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerStarted","Data":"8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.163759 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.167791 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerStarted","Data":"a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4"} Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.168132 4675 status_manager.go:317] "Container readiness changed for unknown container" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" containerID="cri-o://5812f299f3081490ef9642539720500dc151f5d3dbf453185e0464e66fd1f19e" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.168151 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:29:28 crc kubenswrapper[4675]: I1125 13:29:28.808387 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.178004 4675 generic.go:334] "Generic (PLEG): container finished" podID="a271eb36-50fc-40c6-8885-f97f281c1150" containerID="8f657c44b138f2f212bb851cc2ddcc13d1c7e6b111f3d6d1b365b9fa1ae17bbd" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.178084 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerDied","Data":"8f657c44b138f2f212bb851cc2ddcc13d1c7e6b111f3d6d1b365b9fa1ae17bbd"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.178722 4675 scope.go:117] "RemoveContainer" containerID="8f657c44b138f2f212bb851cc2ddcc13d1c7e6b111f3d6d1b365b9fa1ae17bbd" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.181897 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fa6f393-fc29-4035-81da-a9965421c77f" containerID="974aa3462e9cda69134179b7906911f488044f8e81e6f2b8ba4712df18b0a6ab" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.181963 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerDied","Data":"974aa3462e9cda69134179b7906911f488044f8e81e6f2b8ba4712df18b0a6ab"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.182619 4675 scope.go:117] "RemoveContainer" containerID="974aa3462e9cda69134179b7906911f488044f8e81e6f2b8ba4712df18b0a6ab" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.186095 4675 generic.go:334] "Generic (PLEG): container finished" podID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" containerID="b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.186169 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerDied","Data":"b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.186212 4675 scope.go:117] "RemoveContainer" containerID="d6f186a95189e2a38ebddc8a2c833e6b10afdb17812bf5904309989b130e47a1" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.186932 4675 scope.go:117] "RemoveContainer" containerID="b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.187289 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1)\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.192344 4675 generic.go:334] "Generic (PLEG): container finished" podID="d4608140-77a4-4067-b58e-a95ae2249fea" containerID="0ced35a4cfcc7c09f6e43b2b96dfc7622f7b4cd75e77f2a71654181427fe17dd" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.192493 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerDied","Data":"0ced35a4cfcc7c09f6e43b2b96dfc7622f7b4cd75e77f2a71654181427fe17dd"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.193564 4675 scope.go:117] "RemoveContainer" containerID="0ced35a4cfcc7c09f6e43b2b96dfc7622f7b4cd75e77f2a71654181427fe17dd" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.198194 4675 generic.go:334] "Generic (PLEG): container finished" podID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" containerID="8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.198240 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerDied","Data":"8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.199411 4675 scope.go:117] "RemoveContainer" containerID="8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.199852 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-6788cc6d75-4cprh_openstack-operators(8d89af10-26a8-4d8b-aedf-8e450df0f28a)\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podUID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.201182 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" event={"ID":"bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48","Type":"ContainerStarted","Data":"e24b0ec7d3469e635180e7223f9fb58da815244aa92669d4e1e9d41bb2a83815"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.202322 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.207325 4675 generic.go:334] "Generic (PLEG): container finished" podID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" containerID="a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.207410 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerDied","Data":"a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.208038 4675 scope.go:117] "RemoveContainer" containerID="a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.208289 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3)\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.212344 4675 generic.go:334] "Generic (PLEG): container finished" podID="223d4b40-6f09-41f5-816d-7e82b45b4b90" containerID="d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.212476 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerDied","Data":"d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.213165 4675 scope.go:117] "RemoveContainer" containerID="d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.213621 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-698d6fd7d6-nkq7r_openstack-operators(223d4b40-6f09-41f5-816d-7e82b45b4b90)\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podUID="223d4b40-6f09-41f5-816d-7e82b45b4b90" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.215417 4675 generic.go:334] "Generic (PLEG): container finished" podID="21978291-afd8-477d-9e86-80a465441902" containerID="0279dc86d896749c7c861424d3a9544c1a53e5573098bae008e9d631ce0f9c6e" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.215527 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerDied","Data":"0279dc86d896749c7c861424d3a9544c1a53e5573098bae008e9d631ce0f9c6e"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.215940 4675 scope.go:117] "RemoveContainer" containerID="0279dc86d896749c7c861424d3a9544c1a53e5573098bae008e9d631ce0f9c6e" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.232853 4675 generic.go:334] "Generic (PLEG): container finished" podID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" containerID="15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.232959 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerDied","Data":"15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.233368 4675 scope.go:117] "RemoveContainer" containerID="15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.233646 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6b6c55ffd5-84vzh_openstack-operators(a5a68379-3de8-4970-8ca1-ccf52f2d7ad8)\"" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podUID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.243956 4675 generic.go:334] "Generic (PLEG): container finished" podID="fbd303b9-17db-401e-acbf-1ef8219e36df" containerID="06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.244051 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerDied","Data":"06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.244552 4675 scope.go:117] "RemoveContainer" containerID="06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.244917 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7d6f5d799-7p97w_openstack-operators(fbd303b9-17db-401e-acbf-1ef8219e36df)\"" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podUID="fbd303b9-17db-401e-acbf-1ef8219e36df" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.260216 4675 generic.go:334] "Generic (PLEG): container finished" podID="18941428-e287-4374-93e0-3209cdbbf7d7" containerID="971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.260287 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerDied","Data":"971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.261110 4675 scope.go:117] "RemoveContainer" containerID="971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.261990 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7)\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.270089 4675 generic.go:334] "Generic (PLEG): container finished" podID="88da95fd-fdf9-402d-90d8-e742f92cffbb" containerID="490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.270186 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerDied","Data":"490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.271015 4675 scope.go:117] "RemoveContainer" containerID="490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.271375 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-58487d9bf4-9rf4d_openstack-operators(88da95fd-fdf9-402d-90d8-e742f92cffbb)\"" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podUID="88da95fd-fdf9-402d-90d8-e742f92cffbb" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.280918 4675 generic.go:334] "Generic (PLEG): container finished" podID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" containerID="cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.281001 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerDied","Data":"cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.281803 4675 scope.go:117] "RemoveContainer" containerID="cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.282112 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-748967c98-4hkh4_openstack-operators(966aefc3-6c87-4e64-b9ae-0c175f4d18a3)\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podUID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.292513 4675 generic.go:334] "Generic (PLEG): container finished" podID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" containerID="c25f99e64d7fbe32f3f203d044b95f8902f9cf6351f9de4e3d4b34719d92cfff" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.292638 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" event={"ID":"64b432ef-6de9-4d8d-84ce-78f2097bf31e","Type":"ContainerDied","Data":"c25f99e64d7fbe32f3f203d044b95f8902f9cf6351f9de4e3d4b34719d92cfff"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.293466 4675 scope.go:117] "RemoveContainer" containerID="c25f99e64d7fbe32f3f203d044b95f8902f9cf6351f9de4e3d4b34719d92cfff" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.293727 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_openstack-operators(64b432ef-6de9-4d8d-84ce-78f2097bf31e)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.302928 4675 generic.go:334] "Generic (PLEG): container finished" podID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" containerID="140c8f23fe321962ed389be9244bbb312d4f9977c1df359cd469b4616364c1fb" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.303011 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerDied","Data":"140c8f23fe321962ed389be9244bbb312d4f9977c1df359cd469b4616364c1fb"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.303676 4675 scope.go:117] "RemoveContainer" containerID="140c8f23fe321962ed389be9244bbb312d4f9977c1df359cd469b4616364c1fb" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.309526 4675 generic.go:334] "Generic (PLEG): container finished" podID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" containerID="06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.309606 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerDied","Data":"06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.310283 4675 scope.go:117] "RemoveContainer" containerID="06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.310527 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-6b56b8849f-r6m74_openstack-operators(a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb)\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podUID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.317250 4675 generic.go:334] "Generic (PLEG): container finished" podID="e6ff98cd-4075-49dd-b40b-d1923298513e" containerID="af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.317655 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerDied","Data":"af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.318266 4675 scope.go:117] "RemoveContainer" containerID="af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.318627 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-646fd589f9-jdxms_openstack-operators(e6ff98cd-4075-49dd-b40b-d1923298513e)\"" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podUID="e6ff98cd-4075-49dd-b40b-d1923298513e" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.326091 4675 generic.go:334] "Generic (PLEG): container finished" podID="986b1a58-05d0-4beb-9199-a7564c809455" containerID="e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.326172 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerDied","Data":"e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.326945 4675 scope.go:117] "RemoveContainer" containerID="e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26" Nov 25 13:29:29 crc kubenswrapper[4675]: E1125 13:29:29.327255 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.340654 4675 generic.go:334] "Generic (PLEG): container finished" podID="9495eb50-984d-4069-bd95-719e714b1178" containerID="d341685012329cf60dad004a3d06fdac1ee9a567df53723e123a096aebf13959" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.340745 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerDied","Data":"d341685012329cf60dad004a3d06fdac1ee9a567df53723e123a096aebf13959"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.345583 4675 generic.go:334] "Generic (PLEG): container finished" podID="33456bb6-8430-432c-ac26-1c43307141e3" containerID="55146adbbcdb3f3bf90fcc3100ba2b57d12511164f1f7feceb5b5d0e917350fa" exitCode=1 Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.345638 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerDied","Data":"55146adbbcdb3f3bf90fcc3100ba2b57d12511164f1f7feceb5b5d0e917350fa"} Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.346360 4675 scope.go:117] "RemoveContainer" containerID="55146adbbcdb3f3bf90fcc3100ba2b57d12511164f1f7feceb5b5d0e917350fa" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.347837 4675 scope.go:117] "RemoveContainer" containerID="d341685012329cf60dad004a3d06fdac1ee9a567df53723e123a096aebf13959" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.492679 4675 scope.go:117] "RemoveContainer" containerID="45e4f3a968a2c8cab5ba3984005c94977c2d9e1a56138690e5a2607101e378ea" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.540929 4675 scope.go:117] "RemoveContainer" containerID="5812f299f3081490ef9642539720500dc151f5d3dbf453185e0464e66fd1f19e" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.589707 4675 scope.go:117] "RemoveContainer" containerID="4738d8d88de0407838d6011e7e0aa3a8cb6b91f80e567b16bc0e02fb8324d0b7" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.619250 4675 scope.go:117] "RemoveContainer" containerID="990b012da81571e2297f728312722bde123011371a34a3b8401a7d084b744334" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.651449 4675 scope.go:117] "RemoveContainer" containerID="3b3c2e64f3037f4fe2358136d3b2de543be088851ee8c98e61218aeb327ab3a5" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.705863 4675 scope.go:117] "RemoveContainer" containerID="02dfa88b12197fdadfd2aa144f0b9efa359284fda4897a2c6d10709625ce22d3" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.745229 4675 scope.go:117] "RemoveContainer" containerID="061ecdf9aa2d0f61605f4accb5e6b453e9328d3716a9b60260095559670ee9c9" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.783897 4675 scope.go:117] "RemoveContainer" containerID="5fbfece02fe84e009a5d9924115a3d3f5a247d2e50ff39d22cfd53a44aa51915" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.811362 4675 scope.go:117] "RemoveContainer" containerID="bdddf947ea1a6bdc9dafd93f3f61d64e22cef53a665cc93ccb1d99fea80e52bf" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.836160 4675 scope.go:117] "RemoveContainer" containerID="d983778d43e5dd18a742a7903be363e738ff496a4edc1825077bfc8e4e6ec8b1" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.875264 4675 scope.go:117] "RemoveContainer" containerID="6acc2abc8938e97f60eb5f959bb28d00e64d6631a975cd53e288956fa98825e4" Nov 25 13:29:29 crc kubenswrapper[4675]: I1125 13:29:29.955801 4675 scope.go:117] "RemoveContainer" containerID="4aa80bec7c4faa4ccae0910a81ac4bd6fb51056a689f0a3703ab648a6b275892" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.357024 4675 scope.go:117] "RemoveContainer" containerID="06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.357567 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-6b56b8849f-r6m74_openstack-operators(a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb)\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podUID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.361478 4675 scope.go:117] "RemoveContainer" containerID="971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.361771 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7)\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.365938 4675 scope.go:117] "RemoveContainer" containerID="b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.366442 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1)\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.372566 4675 generic.go:334] "Generic (PLEG): container finished" podID="21978291-afd8-477d-9e86-80a465441902" containerID="34b0fbc58cdab1e01348a02d3533c474c419decbe778e24cf622cf41c8558854" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.372639 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerDied","Data":"34b0fbc58cdab1e01348a02d3533c474c419decbe778e24cf622cf41c8558854"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.372694 4675 scope.go:117] "RemoveContainer" containerID="0279dc86d896749c7c861424d3a9544c1a53e5573098bae008e9d631ce0f9c6e" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.373352 4675 scope.go:117] "RemoveContainer" containerID="34b0fbc58cdab1e01348a02d3533c474c419decbe778e24cf622cf41c8558854" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.373630 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-75cf7cf5cb-gbbjk_openstack-operators(21978291-afd8-477d-9e86-80a465441902)\"" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" podUID="21978291-afd8-477d-9e86-80a465441902" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.378043 4675 generic.go:334] "Generic (PLEG): container finished" podID="d4608140-77a4-4067-b58e-a95ae2249fea" containerID="7ffafbfd24b0488a8eeb0651d2e1768f3a63196f0ba98d2212c37acb88c84488" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.378097 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerDied","Data":"7ffafbfd24b0488a8eeb0651d2e1768f3a63196f0ba98d2212c37acb88c84488"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.378468 4675 scope.go:117] "RemoveContainer" containerID="7ffafbfd24b0488a8eeb0651d2e1768f3a63196f0ba98d2212c37acb88c84488" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.378911 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79d658b66d-dls9t_openstack-operators(d4608140-77a4-4067-b58e-a95ae2249fea)\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" podUID="d4608140-77a4-4067-b58e-a95ae2249fea" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.385630 4675 scope.go:117] "RemoveContainer" containerID="8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.385956 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-6788cc6d75-4cprh_openstack-operators(8d89af10-26a8-4d8b-aedf-8e450df0f28a)\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podUID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.389992 4675 scope.go:117] "RemoveContainer" containerID="15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.390242 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6b6c55ffd5-84vzh_openstack-operators(a5a68379-3de8-4970-8ca1-ccf52f2d7ad8)\"" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podUID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.399200 4675 scope.go:117] "RemoveContainer" containerID="a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.399493 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3)\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.407004 4675 scope.go:117] "RemoveContainer" containerID="06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.407272 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7d6f5d799-7p97w_openstack-operators(fbd303b9-17db-401e-acbf-1ef8219e36df)\"" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podUID="fbd303b9-17db-401e-acbf-1ef8219e36df" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.410426 4675 generic.go:334] "Generic (PLEG): container finished" podID="9495eb50-984d-4069-bd95-719e714b1178" containerID="7d5f9c89a9fdd89d749deaf6b4668a98e31c021631b95e9e9afa4c623081be60" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.410495 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerDied","Data":"7d5f9c89a9fdd89d749deaf6b4668a98e31c021631b95e9e9afa4c623081be60"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.411160 4675 scope.go:117] "RemoveContainer" containerID="7d5f9c89a9fdd89d749deaf6b4668a98e31c021631b95e9e9afa4c623081be60" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.411443 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-5b67cfc8fb-4pmkv_openstack-operators(9495eb50-984d-4069-bd95-719e714b1178)\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.417111 4675 scope.go:117] "RemoveContainer" containerID="d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.417363 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-698d6fd7d6-nkq7r_openstack-operators(223d4b40-6f09-41f5-816d-7e82b45b4b90)\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podUID="223d4b40-6f09-41f5-816d-7e82b45b4b90" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.421880 4675 generic.go:334] "Generic (PLEG): container finished" podID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" containerID="edbdb16202724f2b7263ecacd6ef728f8b778aa1a0995ce670fb63e1b21fab9a" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.421950 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerDied","Data":"edbdb16202724f2b7263ecacd6ef728f8b778aa1a0995ce670fb63e1b21fab9a"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.422588 4675 scope.go:117] "RemoveContainer" containerID="edbdb16202724f2b7263ecacd6ef728f8b778aa1a0995ce670fb63e1b21fab9a" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.422948 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-7d5d9fd47f-n6gqt_openstack-operators(51b6ef4f-14c9-4c56-b374-3183ccd5cacb)\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" podUID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.426958 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fa6f393-fc29-4035-81da-a9965421c77f" containerID="79eb90bb43f3655d9f1a5749e4cbfab0c2333f75b89101ae1c92a6619187b249" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.427023 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerDied","Data":"79eb90bb43f3655d9f1a5749e4cbfab0c2333f75b89101ae1c92a6619187b249"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.428444 4675 scope.go:117] "RemoveContainer" containerID="79eb90bb43f3655d9f1a5749e4cbfab0c2333f75b89101ae1c92a6619187b249" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.429342 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-cc9f5bc5c-lr9bx_openstack-operators(6fa6f393-fc29-4035-81da-a9965421c77f)\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.434174 4675 scope.go:117] "RemoveContainer" containerID="af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.434420 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-646fd589f9-jdxms_openstack-operators(e6ff98cd-4075-49dd-b40b-d1923298513e)\"" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podUID="e6ff98cd-4075-49dd-b40b-d1923298513e" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.436774 4675 generic.go:334] "Generic (PLEG): container finished" podID="33456bb6-8430-432c-ac26-1c43307141e3" containerID="a7233ceb8b597b421fe4d73266ae2096211b90e6649de32beb5c9a37edce5280" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.437083 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerDied","Data":"a7233ceb8b597b421fe4d73266ae2096211b90e6649de32beb5c9a37edce5280"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.438192 4675 scope.go:117] "RemoveContainer" containerID="a7233ceb8b597b421fe4d73266ae2096211b90e6649de32beb5c9a37edce5280" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.438546 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-64d7c556cd-tq6jf_openstack-operators(33456bb6-8430-432c-ac26-1c43307141e3)\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podUID="33456bb6-8430-432c-ac26-1c43307141e3" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.444054 4675 scope.go:117] "RemoveContainer" containerID="e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.444341 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.452140 4675 scope.go:117] "RemoveContainer" containerID="490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.452436 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-58487d9bf4-9rf4d_openstack-operators(88da95fd-fdf9-402d-90d8-e742f92cffbb)\"" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podUID="88da95fd-fdf9-402d-90d8-e742f92cffbb" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.456242 4675 scope.go:117] "RemoveContainer" containerID="0ced35a4cfcc7c09f6e43b2b96dfc7622f7b4cd75e77f2a71654181427fe17dd" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.460221 4675 generic.go:334] "Generic (PLEG): container finished" podID="a271eb36-50fc-40c6-8885-f97f281c1150" containerID="e2a3f9a6acad01f6278ff665aea77074e91a4a52a1d2000fb1ea9b0300dee1fa" exitCode=1 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.460363 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerDied","Data":"e2a3f9a6acad01f6278ff665aea77074e91a4a52a1d2000fb1ea9b0300dee1fa"} Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.461136 4675 scope.go:117] "RemoveContainer" containerID="e2a3f9a6acad01f6278ff665aea77074e91a4a52a1d2000fb1ea9b0300dee1fa" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.461483 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-54485f899-6pvms_openstack-operators(a271eb36-50fc-40c6-8885-f97f281c1150)\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.472698 4675 scope.go:117] "RemoveContainer" containerID="cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7" Nov 25 13:29:30 crc kubenswrapper[4675]: E1125 13:29:30.472985 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-748967c98-4hkh4_openstack-operators(966aefc3-6c87-4e64-b9ae-0c175f4d18a3)\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podUID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.516254 4675 scope.go:117] "RemoveContainer" containerID="d341685012329cf60dad004a3d06fdac1ee9a567df53723e123a096aebf13959" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.571446 4675 scope.go:117] "RemoveContainer" containerID="140c8f23fe321962ed389be9244bbb312d4f9977c1df359cd469b4616364c1fb" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.581443 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.581516 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.582273 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"3a5e8646ac9dd8b63a9626924afdcc0168e8ea27d15bda32348f537ecbeda5d5"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.582320 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" containerName="kube-state-metrics" containerID="cri-o://3a5e8646ac9dd8b63a9626924afdcc0168e8ea27d15bda32348f537ecbeda5d5" gracePeriod=30 Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.630031 4675 scope.go:117] "RemoveContainer" containerID="974aa3462e9cda69134179b7906911f488044f8e81e6f2b8ba4712df18b0a6ab" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.686361 4675 scope.go:117] "RemoveContainer" containerID="55146adbbcdb3f3bf90fcc3100ba2b57d12511164f1f7feceb5b5d0e917350fa" Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.700475 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:29:30 crc kubenswrapper[4675]: I1125 13:29:30.718763 4675 scope.go:117] "RemoveContainer" containerID="8f657c44b138f2f212bb851cc2ddcc13d1c7e6b111f3d6d1b365b9fa1ae17bbd" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.497100 4675 generic.go:334] "Generic (PLEG): container finished" podID="76b55738-1ee0-41a4-950a-faa08432f67f" containerID="3a5e8646ac9dd8b63a9626924afdcc0168e8ea27d15bda32348f537ecbeda5d5" exitCode=2 Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.498053 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerDied","Data":"3a5e8646ac9dd8b63a9626924afdcc0168e8ea27d15bda32348f537ecbeda5d5"} Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.498111 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerStarted","Data":"88dbe59f0d8744ec24f0a2fb54e50cb1688622df0f498f6b01867721ee137af6"} Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.498735 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.542787 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.543525 4675 scope.go:117] "RemoveContainer" containerID="a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4" Nov 25 13:29:31 crc kubenswrapper[4675]: E1125 13:29:31.543743 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3)\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.782379 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.783336 4675 scope.go:117] "RemoveContainer" containerID="edbdb16202724f2b7263ecacd6ef728f8b778aa1a0995ce670fb63e1b21fab9a" Nov 25 13:29:31 crc kubenswrapper[4675]: E1125 13:29:31.783683 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-7d5d9fd47f-n6gqt_openstack-operators(51b6ef4f-14c9-4c56-b374-3183ccd5cacb)\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" podUID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.941193 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:29:31 crc kubenswrapper[4675]: I1125 13:29:31.941835 4675 scope.go:117] "RemoveContainer" containerID="e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26" Nov 25 13:29:31 crc kubenswrapper[4675]: E1125 13:29:31.942090 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.079723 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.080455 4675 scope.go:117] "RemoveContainer" containerID="a7233ceb8b597b421fe4d73266ae2096211b90e6649de32beb5c9a37edce5280" Nov 25 13:29:32 crc kubenswrapper[4675]: E1125 13:29:32.080799 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-64d7c556cd-tq6jf_openstack-operators(33456bb6-8430-432c-ac26-1c43307141e3)\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podUID="33456bb6-8430-432c-ac26-1c43307141e3" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.278466 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.279258 4675 scope.go:117] "RemoveContainer" containerID="7ffafbfd24b0488a8eeb0651d2e1768f3a63196f0ba98d2212c37acb88c84488" Nov 25 13:29:32 crc kubenswrapper[4675]: E1125 13:29:32.279648 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79d658b66d-dls9t_openstack-operators(d4608140-77a4-4067-b58e-a95ae2249fea)\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" podUID="d4608140-77a4-4067-b58e-a95ae2249fea" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.434895 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.435776 4675 scope.go:117] "RemoveContainer" containerID="7d5f9c89a9fdd89d749deaf6b4668a98e31c021631b95e9e9afa4c623081be60" Nov 25 13:29:32 crc kubenswrapper[4675]: E1125 13:29:32.436100 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-5b67cfc8fb-4pmkv_openstack-operators(9495eb50-984d-4069-bd95-719e714b1178)\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.512924 4675 generic.go:334] "Generic (PLEG): container finished" podID="76b55738-1ee0-41a4-950a-faa08432f67f" containerID="88dbe59f0d8744ec24f0a2fb54e50cb1688622df0f498f6b01867721ee137af6" exitCode=1 Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.512982 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerDied","Data":"88dbe59f0d8744ec24f0a2fb54e50cb1688622df0f498f6b01867721ee137af6"} Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.513051 4675 scope.go:117] "RemoveContainer" containerID="3a5e8646ac9dd8b63a9626924afdcc0168e8ea27d15bda32348f537ecbeda5d5" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.513624 4675 scope.go:117] "RemoveContainer" containerID="88dbe59f0d8744ec24f0a2fb54e50cb1688622df0f498f6b01867721ee137af6" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.592170 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.593183 4675 scope.go:117] "RemoveContainer" containerID="79eb90bb43f3655d9f1a5749e4cbfab0c2333f75b89101ae1c92a6619187b249" Nov 25 13:29:32 crc kubenswrapper[4675]: E1125 13:29:32.593409 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-cc9f5bc5c-lr9bx_openstack-operators(6fa6f393-fc29-4035-81da-a9965421c77f)\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.615423 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.616019 4675 scope.go:117] "RemoveContainer" containerID="ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285" Nov 25 13:29:32 crc kubenswrapper[4675]: E1125 13:29:32.616311 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-867d87977b-2z8vf_openstack-operators(e8f46595-6a0c-4b55-9839-3360395606f7)\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podUID="e8f46595-6a0c-4b55-9839-3360395606f7" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.886942 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.887864 4675 scope.go:117] "RemoveContainer" containerID="e2a3f9a6acad01f6278ff665aea77074e91a4a52a1d2000fb1ea9b0300dee1fa" Nov 25 13:29:32 crc kubenswrapper[4675]: E1125 13:29:32.888382 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-54485f899-6pvms_openstack-operators(a271eb36-50fc-40c6-8885-f97f281c1150)\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 13:29:32 crc kubenswrapper[4675]: I1125 13:29:32.971584 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-rkgfz" Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.525672 4675 generic.go:334] "Generic (PLEG): container finished" podID="76b55738-1ee0-41a4-950a-faa08432f67f" containerID="29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958" exitCode=1 Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.525755 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerDied","Data":"29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958"} Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.525837 4675 scope.go:117] "RemoveContainer" containerID="88dbe59f0d8744ec24f0a2fb54e50cb1688622df0f498f6b01867721ee137af6" Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.527117 4675 scope.go:117] "RemoveContainer" containerID="29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958" Nov 25 13:29:33 crc kubenswrapper[4675]: E1125 13:29:33.527582 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(76b55738-1ee0-41a4-950a-faa08432f67f)\"" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.740947 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.741964 4675 scope.go:117] "RemoveContainer" containerID="34b0fbc58cdab1e01348a02d3533c474c419decbe778e24cf622cf41c8558854" Nov 25 13:29:33 crc kubenswrapper[4675]: E1125 13:29:33.742257 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-75cf7cf5cb-gbbjk_openstack-operators(21978291-afd8-477d-9e86-80a465441902)\"" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" podUID="21978291-afd8-477d-9e86-80a465441902" Nov 25 13:29:33 crc kubenswrapper[4675]: I1125 13:29:33.867476 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-xxcn9" Nov 25 13:29:34 crc kubenswrapper[4675]: I1125 13:29:34.532786 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:29:34 crc kubenswrapper[4675]: E1125 13:29:34.533224 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:29:34 crc kubenswrapper[4675]: I1125 13:29:34.540069 4675 scope.go:117] "RemoveContainer" containerID="29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958" Nov 25 13:29:34 crc kubenswrapper[4675]: E1125 13:29:34.540434 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(76b55738-1ee0-41a4-950a-faa08432f67f)\"" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.226956 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.306362 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.428197 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.559599 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.699378 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.732540 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.908092 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 13:29:35 crc kubenswrapper[4675]: I1125 13:29:35.979555 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.038655 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.087008 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.099342 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.213565 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.303088 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.357086 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.378016 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.398226 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.532874 4675 scope.go:117] "RemoveContainer" containerID="647a4c8d5b731f44a664f2a451a583ec784832ee99a0b903b10670d6eb329997" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.638671 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-pm2zh" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.663904 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.687594 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.700539 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.903610 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.926831 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 13:29:36 crc kubenswrapper[4675]: I1125 13:29:36.974564 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 13:29:36 crc kubenswrapper[4675]: E1125 13:29:36.979762 4675 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb100a90_931c_4daa_8466_49a1ae50185b.slice/crio-245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb100a90_931c_4daa_8466_49a1ae50185b.slice/crio-conmon-245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad.scope\": RecentStats: unable to find data in memory cache]" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.023536 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.135869 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.242182 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.265856 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.325561 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.358934 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.400253 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.504822 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-p9gx9" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.515121 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.568336 4675 generic.go:334] "Generic (PLEG): container finished" podID="eb100a90-931c-4daa-8466-49a1ae50185b" containerID="245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad" exitCode=1 Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.568374 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" event={"ID":"eb100a90-931c-4daa-8466-49a1ae50185b","Type":"ContainerDied","Data":"245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad"} Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.568405 4675 scope.go:117] "RemoveContainer" containerID="647a4c8d5b731f44a664f2a451a583ec784832ee99a0b903b10670d6eb329997" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.569046 4675 scope.go:117] "RemoveContainer" containerID="245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad" Nov 25 13:29:37 crc kubenswrapper[4675]: E1125 13:29:37.569268 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-5db469f446-85gxv_metallb-system(eb100a90-931c-4daa-8466-49a1ae50185b)\"" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.687039 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-w5kl7" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.688880 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.699976 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.785616 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.852684 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 13:29:37 crc kubenswrapper[4675]: I1125 13:29:37.964113 4675 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wgcdt" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.088286 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.207479 4675 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.275284 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.281192 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.330558 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.425353 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.449528 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.457197 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.464231 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.469187 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-b6rw9" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.478894 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.488179 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.500609 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.534402 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.566052 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.603385 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.686854 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.699801 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.794522 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.808978 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.815185 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.830328 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.832369 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.862713 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 13:29:38 crc kubenswrapper[4675]: I1125 13:29:38.951984 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.020442 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.020653 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.020895 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.075219 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.075875 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.097792 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.138448 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.145210 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8gfpq" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.186307 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.202239 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.236131 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-8qmg9" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.242584 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.283364 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.351805 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.419902 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.438161 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.497089 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.498203 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.498988 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.517802 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.556937 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-5dqnb" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.571097 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.627321 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.646969 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.649430 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.705549 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.746208 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.747990 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.767510 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.770274 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-t8rlm" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.781038 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.801962 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.863752 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.885252 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.891075 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-77b99896c6-rz556" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.924324 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.928988 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.944490 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.944640 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.948600 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.959909 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 13:29:39 crc kubenswrapper[4675]: I1125 13:29:39.977598 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.087214 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.156109 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.169325 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.248565 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.283222 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.290513 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.291700 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-hvkvf" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.314892 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.363564 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.393961 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.409839 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.410718 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.438450 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.461336 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.509166 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.528433 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.534368 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.558456 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.574911 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.575024 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.575646 4675 scope.go:117] "RemoveContainer" containerID="29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958" Nov 25 13:29:40 crc kubenswrapper[4675]: E1125 13:29:40.575879 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(76b55738-1ee0-41a4-950a-faa08432f67f)\"" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.593370 4675 scope.go:117] "RemoveContainer" containerID="29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958" Nov 25 13:29:40 crc kubenswrapper[4675]: E1125 13:29:40.593607 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(76b55738-1ee0-41a4-950a-faa08432f67f)\"" pod="openstack/kube-state-metrics-0" podUID="76b55738-1ee0-41a4-950a-faa08432f67f" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.597510 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.691367 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.722071 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-thhbf" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.766573 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.816432 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-rsjq2" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.849542 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 13:29:40 crc kubenswrapper[4675]: I1125 13:29:40.928393 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.027386 4675 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.035348 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.102680 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.135149 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.180805 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.181138 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.222389 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.225299 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-snq9n" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.293239 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.301645 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-hhzwc" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.306132 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.367060 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.382695 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.449532 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.475584 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.506380 4675 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.532717 4675 scope.go:117] "RemoveContainer" containerID="b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.532936 4675 scope.go:117] "RemoveContainer" containerID="971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.533043 4675 scope.go:117] "RemoveContainer" containerID="c25f99e64d7fbe32f3f203d044b95f8902f9cf6351f9de4e3d4b34719d92cfff" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.533429 4675 scope.go:117] "RemoveContainer" containerID="06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.543105 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.543697 4675 scope.go:117] "RemoveContainer" containerID="a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.555727 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.559104 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.559849 4675 scope.go:117] "RemoveContainer" containerID="cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.575281 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.613952 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.614580 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.628064 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.650715 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-dkh98" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.654924 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.671805 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.701448 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.702278 4675 scope.go:117] "RemoveContainer" containerID="d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.738927 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.741288 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.760957 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.781810 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.782515 4675 scope.go:117] "RemoveContainer" containerID="edbdb16202724f2b7263ecacd6ef728f8b778aa1a0995ce670fb63e1b21fab9a" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.783320 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.817461 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.836804 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.838582 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.857252 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.912780 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-kx8h9" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.915359 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.940302 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.941651 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.942482 4675 scope.go:117] "RemoveContainer" containerID="e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.946440 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.947731 4675 scope.go:117] "RemoveContainer" containerID="8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.974185 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.989919 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.992998 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.993735 4675 scope.go:117] "RemoveContainer" containerID="06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d" Nov 25 13:29:41 crc kubenswrapper[4675]: I1125 13:29:41.996918 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-qlfqc" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.003740 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.042512 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.080549 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.082259 4675 scope.go:117] "RemoveContainer" containerID="a7233ceb8b597b421fe4d73266ae2096211b90e6649de32beb5c9a37edce5280" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.084112 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.147981 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.149374 4675 scope.go:117] "RemoveContainer" containerID="af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.153712 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.163601 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.232272 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.232968 4675 scope.go:117] "RemoveContainer" containerID="15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.239399 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.256241 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.278013 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.278832 4675 scope.go:117] "RemoveContainer" containerID="7ffafbfd24b0488a8eeb0651d2e1768f3a63196f0ba98d2212c37acb88c84488" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.293426 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.392571 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-sfbl5" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.410778 4675 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-72lbr" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.435042 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.436289 4675 scope.go:117] "RemoveContainer" containerID="7d5f9c89a9fdd89d749deaf6b4668a98e31c021631b95e9e9afa4c623081be60" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.473121 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.496719 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.509380 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.510980 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.511526 4675 scope.go:117] "RemoveContainer" containerID="490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.517461 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-ff2p8" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.573893 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.592004 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.592759 4675 scope.go:117] "RemoveContainer" containerID="79eb90bb43f3655d9f1a5749e4cbfab0c2333f75b89101ae1c92a6619187b249" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.615407 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.617302 4675 scope.go:117] "RemoveContainer" containerID="ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.627159 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.630530 4675 generic.go:334] "Generic (PLEG): container finished" podID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" containerID="d70ce3c404e1abb1bad1e7d40273d7f0dc1795f1dbcc32697cfe41d186eb93f0" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.630606 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerDied","Data":"d70ce3c404e1abb1bad1e7d40273d7f0dc1795f1dbcc32697cfe41d186eb93f0"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.630660 4675 scope.go:117] "RemoveContainer" containerID="cd1798caad237c62239c80871d788a2e92adb02e264cb1afe2280ec513be8df7" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.631369 4675 scope.go:117] "RemoveContainer" containerID="d70ce3c404e1abb1bad1e7d40273d7f0dc1795f1dbcc32697cfe41d186eb93f0" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.631704 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-748967c98-4hkh4_openstack-operators(966aefc3-6c87-4e64-b9ae-0c175f4d18a3)\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podUID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.633566 4675 generic.go:334] "Generic (PLEG): container finished" podID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" containerID="ec27808ea49d34358ab29ca84f1f5f4e6e091571a6067de575c138ae8848b82d" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.633621 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerDied","Data":"ec27808ea49d34358ab29ca84f1f5f4e6e091571a6067de575c138ae8848b82d"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.634123 4675 scope.go:117] "RemoveContainer" containerID="ec27808ea49d34358ab29ca84f1f5f4e6e091571a6067de575c138ae8848b82d" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.634398 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-6788cc6d75-4cprh_openstack-operators(8d89af10-26a8-4d8b-aedf-8e450df0f28a)\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podUID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.638958 4675 generic.go:334] "Generic (PLEG): container finished" podID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" containerID="e185ceec38b6578dde35bda5968714bf4f0add63d971c1980972457b1169eddb" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.639022 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerDied","Data":"e185ceec38b6578dde35bda5968714bf4f0add63d971c1980972457b1169eddb"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.639666 4675 scope.go:117] "RemoveContainer" containerID="e185ceec38b6578dde35bda5968714bf4f0add63d971c1980972457b1169eddb" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.639939 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-6b56b8849f-r6m74_openstack-operators(a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb)\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podUID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.645458 4675 generic.go:334] "Generic (PLEG): container finished" podID="fbd303b9-17db-401e-acbf-1ef8219e36df" containerID="a1e332e511e1a5d3bb512bc6965e3349223155f4dd7b317ef0d1768d9153434a" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.645550 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerDied","Data":"a1e332e511e1a5d3bb512bc6965e3349223155f4dd7b317ef0d1768d9153434a"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.651386 4675 generic.go:334] "Generic (PLEG): container finished" podID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" containerID="3db8a0e25772bd9c207974971eacda2fdabaa1ef67cdc0eb00be88a00bae360c" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.651493 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerDied","Data":"3db8a0e25772bd9c207974971eacda2fdabaa1ef67cdc0eb00be88a00bae360c"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.652088 4675 scope.go:117] "RemoveContainer" containerID="3db8a0e25772bd9c207974971eacda2fdabaa1ef67cdc0eb00be88a00bae360c" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.653480 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3)\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.653591 4675 scope.go:117] "RemoveContainer" containerID="a1e332e511e1a5d3bb512bc6965e3349223155f4dd7b317ef0d1768d9153434a" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.653956 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7d6f5d799-7p97w_openstack-operators(fbd303b9-17db-401e-acbf-1ef8219e36df)\"" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podUID="fbd303b9-17db-401e-acbf-1ef8219e36df" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.655302 4675 generic.go:334] "Generic (PLEG): container finished" podID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" containerID="c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.655565 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerDied","Data":"c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.655756 4675 scope.go:117] "RemoveContainer" containerID="c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.656072 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1)\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.658491 4675 generic.go:334] "Generic (PLEG): container finished" podID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" containerID="713b7c11be9183ed3f6f00dede9735971405a82a8e9bdc0b67921617fc67e678" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.658556 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" event={"ID":"64b432ef-6de9-4d8d-84ce-78f2097bf31e","Type":"ContainerDied","Data":"713b7c11be9183ed3f6f00dede9735971405a82a8e9bdc0b67921617fc67e678"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.659057 4675 scope.go:117] "RemoveContainer" containerID="713b7c11be9183ed3f6f00dede9735971405a82a8e9bdc0b67921617fc67e678" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.659712 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_openstack-operators(64b432ef-6de9-4d8d-84ce-78f2097bf31e)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.661123 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.668112 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerStarted","Data":"a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.669997 4675 scope.go:117] "RemoveContainer" containerID="a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.670789 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-64d7c556cd-tq6jf_openstack-operators(33456bb6-8430-432c-ac26-1c43307141e3)\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podUID="33456bb6-8430-432c-ac26-1c43307141e3" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.683544 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.706310 4675 generic.go:334] "Generic (PLEG): container finished" podID="223d4b40-6f09-41f5-816d-7e82b45b4b90" containerID="cf784847706feb1ed3acda7439da7b4b7e05c6ebac80d4b47be74e23514f2636" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.706420 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerDied","Data":"cf784847706feb1ed3acda7439da7b4b7e05c6ebac80d4b47be74e23514f2636"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.707219 4675 scope.go:117] "RemoveContainer" containerID="cf784847706feb1ed3acda7439da7b4b7e05c6ebac80d4b47be74e23514f2636" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.707495 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-698d6fd7d6-nkq7r_openstack-operators(223d4b40-6f09-41f5-816d-7e82b45b4b90)\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podUID="223d4b40-6f09-41f5-816d-7e82b45b4b90" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.714385 4675 generic.go:334] "Generic (PLEG): container finished" podID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" containerID="b24cf421e30c55a1ed8531c0b8f9e4fa1ba202a4fce3c4c080b08e0358deadd1" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.714490 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerDied","Data":"b24cf421e30c55a1ed8531c0b8f9e4fa1ba202a4fce3c4c080b08e0358deadd1"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.715282 4675 scope.go:117] "RemoveContainer" containerID="b24cf421e30c55a1ed8531c0b8f9e4fa1ba202a4fce3c4c080b08e0358deadd1" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.715570 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-7d5d9fd47f-n6gqt_openstack-operators(51b6ef4f-14c9-4c56-b374-3183ccd5cacb)\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" podUID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.735996 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.736010 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.737090 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerStarted","Data":"c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.738997 4675 scope.go:117] "RemoveContainer" containerID="c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.739344 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.747875 4675 generic.go:334] "Generic (PLEG): container finished" podID="18941428-e287-4374-93e0-3209cdbbf7d7" containerID="9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe" exitCode=1 Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.747922 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerDied","Data":"9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe"} Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.748805 4675 scope.go:117] "RemoveContainer" containerID="9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.749184 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7)\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.765436 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.783173 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.788951 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.800158 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.800186 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.834236 4675 scope.go:117] "RemoveContainer" containerID="8c6ae87d8dedebc72578a4a9b3117520308a93bf0e8c99f85e9bf48ee02982ad" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.851747 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.883852 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.885911 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-rmch4" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.887029 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.888000 4675 scope.go:117] "RemoveContainer" containerID="e2a3f9a6acad01f6278ff665aea77074e91a4a52a1d2000fb1ea9b0300dee1fa" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.888035 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.920886 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.924003 4675 scope.go:117] "RemoveContainer" containerID="06c7bcdc7bece7849be5fdaa79d8ce97821c792ab432094979635b0f771d4309" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.955232 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.982269 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 13:29:42 crc kubenswrapper[4675]: I1125 13:29:42.983030 4675 scope.go:117] "RemoveContainer" containerID="245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad" Nov 25 13:29:42 crc kubenswrapper[4675]: E1125 13:29:42.983410 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-5db469f446-85gxv_metallb-system(eb100a90-931c-4daa-8466-49a1ae50185b)\"" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.067726 4675 scope.go:117] "RemoveContainer" containerID="06004f8890c87cdbf438b1d1dac2fe09313881516cab89f7bbcad522171e786d" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.078510 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.078775 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.094351 4675 scope.go:117] "RemoveContainer" containerID="a5c7f8df55beb3046b38b28bfc20e166c03ff50f8cde534ba0c23e78c9f65ad4" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.111648 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.116038 4675 scope.go:117] "RemoveContainer" containerID="b1117e983aeb122641b2e498dea2c4e79e5a86c2c9d9937c3fba1faff7436d24" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.126483 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.141423 4675 scope.go:117] "RemoveContainer" containerID="c25f99e64d7fbe32f3f203d044b95f8902f9cf6351f9de4e3d4b34719d92cfff" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.175377 4675 scope.go:117] "RemoveContainer" containerID="d67984e63d54904c9545eb4072aac0b2f4b42c74625312f1cf8744dcacb0c5ef" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.186771 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.199186 4675 scope.go:117] "RemoveContainer" containerID="edbdb16202724f2b7263ecacd6ef728f8b778aa1a0995ce670fb63e1b21fab9a" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.217495 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.220653 4675 scope.go:117] "RemoveContainer" containerID="971e3e2137148b7b56489592c513ac64d8360ddab794504ac9907429c59f6ed1" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.225623 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.238903 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.239023 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.247777 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.283663 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.308452 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hdnd8" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.379116 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.443202 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-gdw6g" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.493409 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.523030 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.537666 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.545658 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.616960 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.669895 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.690675 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.720881 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.740634 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.742503 4675 scope.go:117] "RemoveContainer" containerID="34b0fbc58cdab1e01348a02d3533c474c419decbe778e24cf622cf41c8558854" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.759311 4675 generic.go:334] "Generic (PLEG): container finished" podID="e6ff98cd-4075-49dd-b40b-d1923298513e" containerID="82305ee78a29bc5f34b60ec6bafbeb7728b446b6c6409faec87a34f2f68d953e" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.759380 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerDied","Data":"82305ee78a29bc5f34b60ec6bafbeb7728b446b6c6409faec87a34f2f68d953e"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.759421 4675 scope.go:117] "RemoveContainer" containerID="af5a1ab459fd5f864e914e4b5e6240d379c8ab964318866891811db89d563ef6" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.760092 4675 scope.go:117] "RemoveContainer" containerID="82305ee78a29bc5f34b60ec6bafbeb7728b446b6c6409faec87a34f2f68d953e" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.760361 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-646fd589f9-jdxms_openstack-operators(e6ff98cd-4075-49dd-b40b-d1923298513e)\"" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podUID="e6ff98cd-4075-49dd-b40b-d1923298513e" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.763443 4675 generic.go:334] "Generic (PLEG): container finished" podID="33456bb6-8430-432c-ac26-1c43307141e3" containerID="a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.763531 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerDied","Data":"a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.764331 4675 scope.go:117] "RemoveContainer" containerID="a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.764595 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-64d7c556cd-tq6jf_openstack-operators(33456bb6-8430-432c-ac26-1c43307141e3)\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podUID="33456bb6-8430-432c-ac26-1c43307141e3" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.777667 4675 scope.go:117] "RemoveContainer" containerID="e185ceec38b6578dde35bda5968714bf4f0add63d971c1980972457b1169eddb" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.777919 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-6b56b8849f-r6m74_openstack-operators(a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb)\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podUID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.786539 4675 generic.go:334] "Generic (PLEG): container finished" podID="a271eb36-50fc-40c6-8885-f97f281c1150" containerID="3cf16ae4e9d2b5860b71b0e4e5e635cc21322710d9745d95a2e8342a08198562" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.786601 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerDied","Data":"3cf16ae4e9d2b5860b71b0e4e5e635cc21322710d9745d95a2e8342a08198562"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.787204 4675 scope.go:117] "RemoveContainer" containerID="3cf16ae4e9d2b5860b71b0e4e5e635cc21322710d9745d95a2e8342a08198562" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.787423 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-54485f899-6pvms_openstack-operators(a271eb36-50fc-40c6-8885-f97f281c1150)\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.792444 4675 generic.go:334] "Generic (PLEG): container finished" podID="6fa6f393-fc29-4035-81da-a9965421c77f" containerID="74b368eae8e49f23bde8dce48738a900c57042bbaf040ec961bd87a2ecf78f34" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.792506 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerDied","Data":"74b368eae8e49f23bde8dce48738a900c57042bbaf040ec961bd87a2ecf78f34"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.793124 4675 scope.go:117] "RemoveContainer" containerID="74b368eae8e49f23bde8dce48738a900c57042bbaf040ec961bd87a2ecf78f34" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.793363 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-cc9f5bc5c-lr9bx_openstack-operators(6fa6f393-fc29-4035-81da-a9965421c77f)\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.800755 4675 scope.go:117] "RemoveContainer" containerID="c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.801009 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1)\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.808978 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.816344 4675 generic.go:334] "Generic (PLEG): container finished" podID="986b1a58-05d0-4beb-9199-a7564c809455" containerID="c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.816415 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerDied","Data":"c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.817075 4675 scope.go:117] "RemoveContainer" containerID="c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.817338 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.820421 4675 generic.go:334] "Generic (PLEG): container finished" podID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" containerID="c336e990ebb6264c6a5780270f1b2fda09c179d307d38b1d8f1a91666caa1fe9" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.820514 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerDied","Data":"c336e990ebb6264c6a5780270f1b2fda09c179d307d38b1d8f1a91666caa1fe9"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.821678 4675 scope.go:117] "RemoveContainer" containerID="c336e990ebb6264c6a5780270f1b2fda09c179d307d38b1d8f1a91666caa1fe9" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.822567 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6b6c55ffd5-84vzh_openstack-operators(a5a68379-3de8-4970-8ca1-ccf52f2d7ad8)\"" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podUID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.832227 4675 scope.go:117] "RemoveContainer" containerID="9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.832618 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7)\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.839035 4675 generic.go:334] "Generic (PLEG): container finished" podID="e8f46595-6a0c-4b55-9839-3360395606f7" containerID="eb9aff47de6c5752016cc28c464d5a476eeaa7dfb1697cc6f103343bd54b5a2f" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.839128 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerDied","Data":"eb9aff47de6c5752016cc28c464d5a476eeaa7dfb1697cc6f103343bd54b5a2f"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.840012 4675 scope.go:117] "RemoveContainer" containerID="eb9aff47de6c5752016cc28c464d5a476eeaa7dfb1697cc6f103343bd54b5a2f" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.840335 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-867d87977b-2z8vf_openstack-operators(e8f46595-6a0c-4b55-9839-3360395606f7)\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podUID="e8f46595-6a0c-4b55-9839-3360395606f7" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.854716 4675 generic.go:334] "Generic (PLEG): container finished" podID="88da95fd-fdf9-402d-90d8-e742f92cffbb" containerID="e50651a32f30ef5f8c249945a416bca0d6810134c1dc1c5b8c18b3eed8f94673" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.854774 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerDied","Data":"e50651a32f30ef5f8c249945a416bca0d6810134c1dc1c5b8c18b3eed8f94673"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.855240 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.855386 4675 scope.go:117] "RemoveContainer" containerID="e50651a32f30ef5f8c249945a416bca0d6810134c1dc1c5b8c18b3eed8f94673" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.855604 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-58487d9bf4-9rf4d_openstack-operators(88da95fd-fdf9-402d-90d8-e742f92cffbb)\"" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podUID="88da95fd-fdf9-402d-90d8-e742f92cffbb" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.864881 4675 generic.go:334] "Generic (PLEG): container finished" podID="9495eb50-984d-4069-bd95-719e714b1178" containerID="e45da40461f4ba6b49a15fd3e1bb480e28c28098a966fc6b39f1f66d595e5680" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.864947 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerDied","Data":"e45da40461f4ba6b49a15fd3e1bb480e28c28098a966fc6b39f1f66d595e5680"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.865491 4675 scope.go:117] "RemoveContainer" containerID="e45da40461f4ba6b49a15fd3e1bb480e28c28098a966fc6b39f1f66d595e5680" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.865753 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-5b67cfc8fb-4pmkv_openstack-operators(9495eb50-984d-4069-bd95-719e714b1178)\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.869151 4675 generic.go:334] "Generic (PLEG): container finished" podID="d4608140-77a4-4067-b58e-a95ae2249fea" containerID="3e87e6756fa19f4da2677a2395d77d876e45ce138b3cf3fc7f7525283353294e" exitCode=1 Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.869179 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerDied","Data":"3e87e6756fa19f4da2677a2395d77d876e45ce138b3cf3fc7f7525283353294e"} Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.869661 4675 scope.go:117] "RemoveContainer" containerID="3e87e6756fa19f4da2677a2395d77d876e45ce138b3cf3fc7f7525283353294e" Nov 25 13:29:43 crc kubenswrapper[4675]: E1125 13:29:43.869941 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79d658b66d-dls9t_openstack-operators(d4608140-77a4-4067-b58e-a95ae2249fea)\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" podUID="d4608140-77a4-4067-b58e-a95ae2249fea" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.872583 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.875887 4675 scope.go:117] "RemoveContainer" containerID="a7233ceb8b597b421fe4d73266ae2096211b90e6649de32beb5c9a37edce5280" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.930829 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.944936 4675 scope.go:117] "RemoveContainer" containerID="e2a3f9a6acad01f6278ff665aea77074e91a4a52a1d2000fb1ea9b0300dee1fa" Nov 25 13:29:43 crc kubenswrapper[4675]: I1125 13:29:43.976265 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.007443 4675 scope.go:117] "RemoveContainer" containerID="79eb90bb43f3655d9f1a5749e4cbfab0c2333f75b89101ae1c92a6619187b249" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.033433 4675 scope.go:117] "RemoveContainer" containerID="e74c1b027e6dee47e6fad52567900e44abed742700541865c92a16de8df3de26" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.055402 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.071344 4675 scope.go:117] "RemoveContainer" containerID="15c25a88718c438a6b77ca6cdc7a5f43ffabe076c9a153ad679180fded5828ad" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.086637 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bxzgq" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.087521 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.098141 4675 scope.go:117] "RemoveContainer" containerID="ca926bb3444d9cd246ffb0b71329e26500e9b9408172b2a58f2ca30aac249285" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.119696 4675 scope.go:117] "RemoveContainer" containerID="490b14cdad7ef8a6483abd3c5bc80d39ec66f513d8a806987718e02e86fe933f" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.140801 4675 scope.go:117] "RemoveContainer" containerID="7d5f9c89a9fdd89d749deaf6b4668a98e31c021631b95e9e9afa4c623081be60" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.164468 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.164929 4675 scope.go:117] "RemoveContainer" containerID="7ffafbfd24b0488a8eeb0651d2e1768f3a63196f0ba98d2212c37acb88c84488" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.170856 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-2wl6d" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.205664 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.206188 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.286939 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.316411 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.426422 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.431722 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-ts4mt" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.456968 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-qxk2b" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.530547 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.532890 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.553884 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.556421 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.567651 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.639502 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.662316 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-v54zb" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.689153 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-9w8kl" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.701833 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.731428 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.749142 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.755731 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.760048 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.770429 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.770725 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.802072 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.825329 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.876627 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.885028 4675 generic.go:334] "Generic (PLEG): container finished" podID="21978291-afd8-477d-9e86-80a465441902" containerID="880cb3430d867f62b8dfef417a82ebf9a04c736fbb22c798aa92def74d8c5160" exitCode=1 Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.885098 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerDied","Data":"880cb3430d867f62b8dfef417a82ebf9a04c736fbb22c798aa92def74d8c5160"} Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.885145 4675 scope.go:117] "RemoveContainer" containerID="34b0fbc58cdab1e01348a02d3533c474c419decbe778e24cf622cf41c8558854" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.885798 4675 scope.go:117] "RemoveContainer" containerID="880cb3430d867f62b8dfef417a82ebf9a04c736fbb22c798aa92def74d8c5160" Nov 25 13:29:44 crc kubenswrapper[4675]: E1125 13:29:44.886204 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-75cf7cf5cb-gbbjk_openstack-operators(21978291-afd8-477d-9e86-80a465441902)\"" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" podUID="21978291-afd8-477d-9e86-80a465441902" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.931203 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.962485 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 13:29:44 crc kubenswrapper[4675]: I1125 13:29:44.998106 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-7qmsf" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.000695 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.123763 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.149064 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-pngsz" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.158875 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.252581 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.304076 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-f9579" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.314536 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.331482 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.355344 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.466956 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-h7b69" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.481951 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.525497 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.544935 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.559442 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:29:45 crc kubenswrapper[4675]: E1125 13:29:45.559989 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.613448 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-fqkwn" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.635754 4675 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.636050 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.681193 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.715601 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.720728 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.727864 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.753450 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.758168 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-zb75l" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.763093 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.772638 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.786324 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.867801 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.896878 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.905448 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.928211 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xgbxq" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.929909 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.947372 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.957345 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.979936 4675 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.997351 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-pfpzw" Nov 25 13:29:45 crc kubenswrapper[4675]: I1125 13:29:45.998473 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.006994 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.022464 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.025565 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.087191 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-kk54k" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.111132 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.111483 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.200421 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.208007 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.246061 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.256222 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-b4krb" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.266937 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.273610 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.276448 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.329310 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.426595 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-sh2db" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.470377 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.479100 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.485647 4675 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-57d5w" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.522635 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-qcbwl" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.534351 4675 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.549275 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=42.549258275 podStartE2EDuration="42.549258275s" podCreationTimestamp="2025-11-25 13:29:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:29:25.130646844 +0000 UTC m=+3710.302239195" watchObservedRunningTime="2025-11-25 13:29:46.549258275 +0000 UTC m=+3731.720850616" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.552202 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.552250 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.566079 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.573276 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.575513 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.585370 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=21.585354777 podStartE2EDuration="21.585354777s" podCreationTimestamp="2025-11-25 13:29:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:29:46.577040064 +0000 UTC m=+3731.748632405" watchObservedRunningTime="2025-11-25 13:29:46.585354777 +0000 UTC m=+3731.756947128" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.641120 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.655311 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.666507 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.779391 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.793365 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-rqnn6" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.832618 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-s8rdk" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.865047 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.865756 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.869774 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.890121 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.926000 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.935443 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.942302 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.945840 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.946343 4675 generic.go:334] "Generic (PLEG): container finished" podID="ff650df4-ed32-43ee-99cf-25ea4d4b55d8" containerID="70cfe4a9a11556d0b69afff67a3fa2a0e632fd4d1453a16ed990f838b5aa0f40" exitCode=1 Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.946551 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-6lknr" event={"ID":"ff650df4-ed32-43ee-99cf-25ea4d4b55d8","Type":"ContainerDied","Data":"70cfe4a9a11556d0b69afff67a3fa2a0e632fd4d1453a16ed990f838b5aa0f40"} Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.947335 4675 scope.go:117] "RemoveContainer" containerID="70cfe4a9a11556d0b69afff67a3fa2a0e632fd4d1453a16ed990f838b5aa0f40" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.952976 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.964492 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.972683 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 13:29:46 crc kubenswrapper[4675]: I1125 13:29:46.981715 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.004183 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.032063 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.067454 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.100526 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.150207 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-c2rs6" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.271341 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.404048 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.406862 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-69vb7" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.505181 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.536033 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.538774 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.621206 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.622615 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.647558 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.651774 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.714612 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.731689 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.739268 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.833248 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.844967 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6mcdr" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.864446 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.880932 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.886667 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.917771 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.939890 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-z7zkg" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.955936 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-6lknr" event={"ID":"ff650df4-ed32-43ee-99cf-25ea4d4b55d8","Type":"ContainerStarted","Data":"10a1595fb081c80edf5c3604b8f767332adfc17ce4aff2c235f15125d142524c"} Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.958631 4675 generic.go:334] "Generic (PLEG): container finished" podID="9624922e-a281-4931-97a5-47ae5c1e78f4" containerID="aebaf16aba45415c792394b8ff00c09669419d73520fe51a027f50750ac30743" exitCode=1 Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.959499 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" event={"ID":"9624922e-a281-4931-97a5-47ae5c1e78f4","Type":"ContainerDied","Data":"aebaf16aba45415c792394b8ff00c09669419d73520fe51a027f50750ac30743"} Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.959795 4675 scope.go:117] "RemoveContainer" containerID="aebaf16aba45415c792394b8ff00c09669419d73520fe51a027f50750ac30743" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.974654 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.996206 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 13:29:47 crc kubenswrapper[4675]: I1125 13:29:47.998583 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.037153 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-tc796" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.055598 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.098268 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.098898 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.104000 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.114134 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-sqjxf" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.177483 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.247535 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.261610 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-6tx6c" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.311736 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-px4x4" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.351614 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.364254 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.506674 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.507941 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.571073 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-rlth7" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.726932 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.756999 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.777159 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.808165 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.850172 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.853391 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.853847 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.868397 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.908612 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.953570 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-cfxt2" Nov 25 13:29:48 crc kubenswrapper[4675]: I1125 13:29:48.969437 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvcg9" event={"ID":"9624922e-a281-4931-97a5-47ae5c1e78f4","Type":"ContainerStarted","Data":"33870fc3ebf1b9ca20dd686e97afa4afd0fe853c39ad9ad1bc2586474ce88381"} Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.075538 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.090238 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.180271 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.186289 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.198211 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.240931 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.253389 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.267847 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.276355 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.343532 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.347936 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.443484 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.454066 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.507222 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.619449 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.739748 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.770489 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.800451 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-hv9m7" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.896200 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 13:29:49 crc kubenswrapper[4675]: I1125 13:29:49.922693 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-fl8vx" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.088162 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.101636 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.296885 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.374439 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.425513 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.449587 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.532392 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.555096 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.596982 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.636586 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.655302 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.708480 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.755442 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.784138 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.831638 4675 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.894098 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.932768 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 13:29:50 crc kubenswrapper[4675]: I1125 13:29:50.983661 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.075046 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.117885 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.173030 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.263722 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.271333 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.283546 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.295720 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.398252 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.412001 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.488861 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-nt2qj" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.529202 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.543376 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.544109 4675 scope.go:117] "RemoveContainer" containerID="3db8a0e25772bd9c207974971eacda2fdabaa1ef67cdc0eb00be88a00bae360c" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.544366 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3)\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.558268 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.558771 4675 scope.go:117] "RemoveContainer" containerID="d70ce3c404e1abb1bad1e7d40273d7f0dc1795f1dbcc32697cfe41d186eb93f0" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.559015 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-748967c98-4hkh4_openstack-operators(966aefc3-6c87-4e64-b9ae-0c175f4d18a3)\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podUID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.660522 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.701457 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.702172 4675 scope.go:117] "RemoveContainer" containerID="cf784847706feb1ed3acda7439da7b4b7e05c6ebac80d4b47be74e23514f2636" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.702394 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-698d6fd7d6-nkq7r_openstack-operators(223d4b40-6f09-41f5-816d-7e82b45b4b90)\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podUID="223d4b40-6f09-41f5-816d-7e82b45b4b90" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.782049 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.782754 4675 scope.go:117] "RemoveContainer" containerID="b24cf421e30c55a1ed8531c0b8f9e4fa1ba202a4fce3c4c080b08e0358deadd1" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.783037 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-7d5d9fd47f-n6gqt_openstack-operators(51b6ef4f-14c9-4c56-b374-3183ccd5cacb)\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" podUID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.837891 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.838636 4675 scope.go:117] "RemoveContainer" containerID="9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.839039 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7)\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.941424 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.942272 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.943124 4675 scope.go:117] "RemoveContainer" containerID="ec27808ea49d34358ab29ca84f1f5f4e6e091571a6067de575c138ae8848b82d" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.943289 4675 scope.go:117] "RemoveContainer" containerID="c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.943423 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-6788cc6d75-4cprh_openstack-operators(8d89af10-26a8-4d8b-aedf-8e450df0f28a)\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podUID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.943747 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.993137 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 13:29:51 crc kubenswrapper[4675]: I1125 13:29:51.994108 4675 scope.go:117] "RemoveContainer" containerID="a1e332e511e1a5d3bb512bc6965e3349223155f4dd7b317ef0d1768d9153434a" Nov 25 13:29:51 crc kubenswrapper[4675]: E1125 13:29:51.994478 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7d6f5d799-7p97w_openstack-operators(fbd303b9-17db-401e-acbf-1ef8219e36df)\"" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podUID="fbd303b9-17db-401e-acbf-1ef8219e36df" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.020684 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.074332 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.079897 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.080582 4675 scope.go:117] "RemoveContainer" containerID="a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.080804 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-64d7c556cd-tq6jf_openstack-operators(33456bb6-8430-432c-ac26-1c43307141e3)\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podUID="33456bb6-8430-432c-ac26-1c43307141e3" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.147671 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.148581 4675 scope.go:117] "RemoveContainer" containerID="82305ee78a29bc5f34b60ec6bafbeb7728b446b6c6409faec87a34f2f68d953e" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.148921 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-646fd589f9-jdxms_openstack-operators(e6ff98cd-4075-49dd-b40b-d1923298513e)\"" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podUID="e6ff98cd-4075-49dd-b40b-d1923298513e" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.233031 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.233742 4675 scope.go:117] "RemoveContainer" containerID="c336e990ebb6264c6a5780270f1b2fda09c179d307d38b1d8f1a91666caa1fe9" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.234030 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6b6c55ffd5-84vzh_openstack-operators(a5a68379-3de8-4970-8ca1-ccf52f2d7ad8)\"" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podUID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.239038 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.239776 4675 scope.go:117] "RemoveContainer" containerID="c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.240071 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1)\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.278152 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.278621 4675 scope.go:117] "RemoveContainer" containerID="3e87e6756fa19f4da2677a2395d77d876e45ce138b3cf3fc7f7525283353294e" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.278849 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79d658b66d-dls9t_openstack-operators(d4608140-77a4-4067-b58e-a95ae2249fea)\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" podUID="d4608140-77a4-4067-b58e-a95ae2249fea" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.435101 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.435927 4675 scope.go:117] "RemoveContainer" containerID="e45da40461f4ba6b49a15fd3e1bb480e28c28098a966fc6b39f1f66d595e5680" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.436208 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-5b67cfc8fb-4pmkv_openstack-operators(9495eb50-984d-4069-bd95-719e714b1178)\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.510914 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.511893 4675 scope.go:117] "RemoveContainer" containerID="e50651a32f30ef5f8c249945a416bca0d6810134c1dc1c5b8c18b3eed8f94673" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.512183 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-58487d9bf4-9rf4d_openstack-operators(88da95fd-fdf9-402d-90d8-e742f92cffbb)\"" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podUID="88da95fd-fdf9-402d-90d8-e742f92cffbb" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.591605 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.592380 4675 scope.go:117] "RemoveContainer" containerID="74b368eae8e49f23bde8dce48738a900c57042bbaf040ec961bd87a2ecf78f34" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.592650 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-cc9f5bc5c-lr9bx_openstack-operators(6fa6f393-fc29-4035-81da-a9965421c77f)\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.615619 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.616584 4675 scope.go:117] "RemoveContainer" containerID="eb9aff47de6c5752016cc28c464d5a476eeaa7dfb1697cc6f103343bd54b5a2f" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.616953 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-867d87977b-2z8vf_openstack-operators(e8f46595-6a0c-4b55-9839-3360395606f7)\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podUID="e8f46595-6a0c-4b55-9839-3360395606f7" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.887457 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.888270 4675 scope.go:117] "RemoveContainer" containerID="3cf16ae4e9d2b5860b71b0e4e5e635cc21322710d9745d95a2e8342a08198562" Nov 25 13:29:52 crc kubenswrapper[4675]: E1125 13:29:52.888570 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-54485f899-6pvms_openstack-operators(a271eb36-50fc-40c6-8885-f97f281c1150)\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.912795 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 13:29:52 crc kubenswrapper[4675]: I1125 13:29:52.955098 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 13:29:53 crc kubenswrapper[4675]: I1125 13:29:53.508978 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 13:29:53 crc kubenswrapper[4675]: I1125 13:29:53.532606 4675 scope.go:117] "RemoveContainer" containerID="245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad" Nov 25 13:29:53 crc kubenswrapper[4675]: E1125 13:29:53.532940 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-5db469f446-85gxv_metallb-system(eb100a90-931c-4daa-8466-49a1ae50185b)\"" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" podUID="eb100a90-931c-4daa-8466-49a1ae50185b" Nov 25 13:29:53 crc kubenswrapper[4675]: I1125 13:29:53.741282 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 13:29:53 crc kubenswrapper[4675]: I1125 13:29:53.741869 4675 scope.go:117] "RemoveContainer" containerID="880cb3430d867f62b8dfef417a82ebf9a04c736fbb22c798aa92def74d8c5160" Nov 25 13:29:53 crc kubenswrapper[4675]: E1125 13:29:53.742124 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-75cf7cf5cb-gbbjk_openstack-operators(21978291-afd8-477d-9e86-80a465441902)\"" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" podUID="21978291-afd8-477d-9e86-80a465441902" Nov 25 13:29:55 crc kubenswrapper[4675]: I1125 13:29:55.547913 4675 scope.go:117] "RemoveContainer" containerID="29efb27fc1df8ec0034eb42ba7cbd198681673401b366e6faf23e304a3438958" Nov 25 13:29:56 crc kubenswrapper[4675]: I1125 13:29:56.532032 4675 scope.go:117] "RemoveContainer" containerID="713b7c11be9183ed3f6f00dede9735971405a82a8e9bdc0b67921617fc67e678" Nov 25 13:29:56 crc kubenswrapper[4675]: I1125 13:29:56.532568 4675 scope.go:117] "RemoveContainer" containerID="e185ceec38b6578dde35bda5968714bf4f0add63d971c1980972457b1169eddb" Nov 25 13:29:56 crc kubenswrapper[4675]: E1125 13:29:56.532796 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-6b56b8849f-r6m74_openstack-operators(a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb)\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" podUID="a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb" Nov 25 13:29:56 crc kubenswrapper[4675]: E1125 13:29:56.532805 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_openstack-operators(64b432ef-6de9-4d8d-84ce-78f2097bf31e)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" podUID="64b432ef-6de9-4d8d-84ce-78f2097bf31e" Nov 25 13:29:57 crc kubenswrapper[4675]: I1125 13:29:57.034001 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"76b55738-1ee0-41a4-950a-faa08432f67f","Type":"ContainerStarted","Data":"20ff69aea5febbb2e4818ddaec59936a2c4b6f8555672554269ed34bdb40ff46"} Nov 25 13:29:57 crc kubenswrapper[4675]: I1125 13:29:57.034269 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 13:29:57 crc kubenswrapper[4675]: I1125 13:29:57.948904 4675 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 13:29:57 crc kubenswrapper[4675]: I1125 13:29:57.949403 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://7ed8f81e90626eebe0845ed795224d18fe91d4977556f9ac4d2f7fed4650f86e" gracePeriod=5 Nov 25 13:29:59 crc kubenswrapper[4675]: I1125 13:29:59.533687 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:29:59 crc kubenswrapper[4675]: E1125 13:29:59.533959 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.190265 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc"] Nov 25 13:30:00 crc kubenswrapper[4675]: E1125 13:30:00.190950 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" containerName="installer" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.190966 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" containerName="installer" Nov 25 13:30:00 crc kubenswrapper[4675]: E1125 13:30:00.191009 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.191016 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.191195 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="526934b8-a76a-4c95-b2ae-84d5bb58742a" containerName="installer" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.191215 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.191943 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.195523 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.195805 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.204550 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc"] Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.268617 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vm6q\" (UniqueName: \"kubernetes.io/projected/560fee96-8b76-42bf-9a98-7252dd46590f-kube-api-access-8vm6q\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.268655 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/560fee96-8b76-42bf-9a98-7252dd46590f-config-volume\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.268701 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/560fee96-8b76-42bf-9a98-7252dd46590f-secret-volume\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.371723 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/560fee96-8b76-42bf-9a98-7252dd46590f-config-volume\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.371763 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vm6q\" (UniqueName: \"kubernetes.io/projected/560fee96-8b76-42bf-9a98-7252dd46590f-kube-api-access-8vm6q\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.371825 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/560fee96-8b76-42bf-9a98-7252dd46590f-secret-volume\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.372598 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/560fee96-8b76-42bf-9a98-7252dd46590f-config-volume\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.379884 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/560fee96-8b76-42bf-9a98-7252dd46590f-secret-volume\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.392665 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vm6q\" (UniqueName: \"kubernetes.io/projected/560fee96-8b76-42bf-9a98-7252dd46590f-kube-api-access-8vm6q\") pod \"collect-profiles-29401290-lzbpc\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.518536 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:00 crc kubenswrapper[4675]: I1125 13:30:00.992631 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc"] Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.069056 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" event={"ID":"560fee96-8b76-42bf-9a98-7252dd46590f","Type":"ContainerStarted","Data":"4eeae421a55b0590dadbd555c277888dee1e7c844eb268012e824b00dbf5b77c"} Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.542646 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.543660 4675 scope.go:117] "RemoveContainer" containerID="3db8a0e25772bd9c207974971eacda2fdabaa1ef67cdc0eb00be88a00bae360c" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.543893 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-5689899996-24rxr_openstack-operators(ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3)\"" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" podUID="ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.559634 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.560305 4675 scope.go:117] "RemoveContainer" containerID="d70ce3c404e1abb1bad1e7d40273d7f0dc1795f1dbcc32697cfe41d186eb93f0" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.560578 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-748967c98-4hkh4_openstack-operators(966aefc3-6c87-4e64-b9ae-0c175f4d18a3)\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" podUID="966aefc3-6c87-4e64-b9ae-0c175f4d18a3" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.701496 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.702248 4675 scope.go:117] "RemoveContainer" containerID="cf784847706feb1ed3acda7439da7b4b7e05c6ebac80d4b47be74e23514f2636" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.702595 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-698d6fd7d6-nkq7r_openstack-operators(223d4b40-6f09-41f5-816d-7e82b45b4b90)\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" podUID="223d4b40-6f09-41f5-816d-7e82b45b4b90" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.782300 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.783637 4675 scope.go:117] "RemoveContainer" containerID="b24cf421e30c55a1ed8531c0b8f9e4fa1ba202a4fce3c4c080b08e0358deadd1" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.783897 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-7d5d9fd47f-n6gqt_openstack-operators(51b6ef4f-14c9-4c56-b374-3183ccd5cacb)\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" podUID="51b6ef4f-14c9-4c56-b374-3183ccd5cacb" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.837635 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.838772 4675 scope.go:117] "RemoveContainer" containerID="9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.839067 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-577c5f6d94-svnp9_openstack-operators(18941428-e287-4374-93e0-3209cdbbf7d7)\"" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" podUID="18941428-e287-4374-93e0-3209cdbbf7d7" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.941220 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.941921 4675 scope.go:117] "RemoveContainer" containerID="c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.942153 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-6bd966bbd4-hzjqx_openstack-operators(986b1a58-05d0-4beb-9199-a7564c809455)\"" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" podUID="986b1a58-05d0-4beb-9199-a7564c809455" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.943540 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.944408 4675 scope.go:117] "RemoveContainer" containerID="ec27808ea49d34358ab29ca84f1f5f4e6e091571a6067de575c138ae8848b82d" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.944664 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-6788cc6d75-4cprh_openstack-operators(8d89af10-26a8-4d8b-aedf-8e450df0f28a)\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" podUID="8d89af10-26a8-4d8b-aedf-8e450df0f28a" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.993085 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 13:30:01 crc kubenswrapper[4675]: I1125 13:30:01.993781 4675 scope.go:117] "RemoveContainer" containerID="a1e332e511e1a5d3bb512bc6965e3349223155f4dd7b317ef0d1768d9153434a" Nov 25 13:30:01 crc kubenswrapper[4675]: E1125 13:30:01.994082 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7d6f5d799-7p97w_openstack-operators(fbd303b9-17db-401e-acbf-1ef8219e36df)\"" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" podUID="fbd303b9-17db-401e-acbf-1ef8219e36df" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.080154 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.080895 4675 scope.go:117] "RemoveContainer" containerID="a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.081162 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-64d7c556cd-tq6jf_openstack-operators(33456bb6-8430-432c-ac26-1c43307141e3)\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" podUID="33456bb6-8430-432c-ac26-1c43307141e3" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.083165 4675 generic.go:334] "Generic (PLEG): container finished" podID="560fee96-8b76-42bf-9a98-7252dd46590f" containerID="3befad52684d484ff78679d5b91a7f89c5845ecab7077ab0582945206c14d3e5" exitCode=0 Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.083195 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" event={"ID":"560fee96-8b76-42bf-9a98-7252dd46590f","Type":"ContainerDied","Data":"3befad52684d484ff78679d5b91a7f89c5845ecab7077ab0582945206c14d3e5"} Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.147184 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.147963 4675 scope.go:117] "RemoveContainer" containerID="82305ee78a29bc5f34b60ec6bafbeb7728b446b6c6409faec87a34f2f68d953e" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.148197 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-646fd589f9-jdxms_openstack-operators(e6ff98cd-4075-49dd-b40b-d1923298513e)\"" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" podUID="e6ff98cd-4075-49dd-b40b-d1923298513e" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.232360 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.233216 4675 scope.go:117] "RemoveContainer" containerID="c336e990ebb6264c6a5780270f1b2fda09c179d307d38b1d8f1a91666caa1fe9" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.233537 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6b6c55ffd5-84vzh_openstack-operators(a5a68379-3de8-4970-8ca1-ccf52f2d7ad8)\"" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" podUID="a5a68379-3de8-4970-8ca1-ccf52f2d7ad8" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.239351 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.239985 4675 scope.go:117] "RemoveContainer" containerID="c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.240238 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-7979c68bc7-m6zl4_openstack-operators(8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1)\"" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" podUID="8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.278238 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.278965 4675 scope.go:117] "RemoveContainer" containerID="3e87e6756fa19f4da2677a2395d77d876e45ce138b3cf3fc7f7525283353294e" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.279230 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79d658b66d-dls9t_openstack-operators(d4608140-77a4-4067-b58e-a95ae2249fea)\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" podUID="d4608140-77a4-4067-b58e-a95ae2249fea" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.435556 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.436312 4675 scope.go:117] "RemoveContainer" containerID="e45da40461f4ba6b49a15fd3e1bb480e28c28098a966fc6b39f1f66d595e5680" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.436578 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-5b67cfc8fb-4pmkv_openstack-operators(9495eb50-984d-4069-bd95-719e714b1178)\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" podUID="9495eb50-984d-4069-bd95-719e714b1178" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.510620 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.511365 4675 scope.go:117] "RemoveContainer" containerID="e50651a32f30ef5f8c249945a416bca0d6810134c1dc1c5b8c18b3eed8f94673" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.511588 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-58487d9bf4-9rf4d_openstack-operators(88da95fd-fdf9-402d-90d8-e742f92cffbb)\"" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" podUID="88da95fd-fdf9-402d-90d8-e742f92cffbb" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.591501 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.592292 4675 scope.go:117] "RemoveContainer" containerID="74b368eae8e49f23bde8dce48738a900c57042bbaf040ec961bd87a2ecf78f34" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.592564 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-cc9f5bc5c-lr9bx_openstack-operators(6fa6f393-fc29-4035-81da-a9965421c77f)\"" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" podUID="6fa6f393-fc29-4035-81da-a9965421c77f" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.615558 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.616073 4675 scope.go:117] "RemoveContainer" containerID="eb9aff47de6c5752016cc28c464d5a476eeaa7dfb1697cc6f103343bd54b5a2f" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.616318 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-867d87977b-2z8vf_openstack-operators(e8f46595-6a0c-4b55-9839-3360395606f7)\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" podUID="e8f46595-6a0c-4b55-9839-3360395606f7" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.886895 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 13:30:02 crc kubenswrapper[4675]: I1125 13:30:02.887417 4675 scope.go:117] "RemoveContainer" containerID="3cf16ae4e9d2b5860b71b0e4e5e635cc21322710d9745d95a2e8342a08198562" Nov 25 13:30:02 crc kubenswrapper[4675]: E1125 13:30:02.887631 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-54485f899-6pvms_openstack-operators(a271eb36-50fc-40c6-8885-f97f281c1150)\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" podUID="a271eb36-50fc-40c6-8885-f97f281c1150" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.094163 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.094214 4675 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="7ed8f81e90626eebe0845ed795224d18fe91d4977556f9ac4d2f7fed4650f86e" exitCode=137 Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.428833 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.529962 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/560fee96-8b76-42bf-9a98-7252dd46590f-config-volume\") pod \"560fee96-8b76-42bf-9a98-7252dd46590f\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.530038 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vm6q\" (UniqueName: \"kubernetes.io/projected/560fee96-8b76-42bf-9a98-7252dd46590f-kube-api-access-8vm6q\") pod \"560fee96-8b76-42bf-9a98-7252dd46590f\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.530322 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/560fee96-8b76-42bf-9a98-7252dd46590f-secret-volume\") pod \"560fee96-8b76-42bf-9a98-7252dd46590f\" (UID: \"560fee96-8b76-42bf-9a98-7252dd46590f\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.530628 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/560fee96-8b76-42bf-9a98-7252dd46590f-config-volume" (OuterVolumeSpecName: "config-volume") pod "560fee96-8b76-42bf-9a98-7252dd46590f" (UID: "560fee96-8b76-42bf-9a98-7252dd46590f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.530853 4675 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/560fee96-8b76-42bf-9a98-7252dd46590f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.538596 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/560fee96-8b76-42bf-9a98-7252dd46590f-kube-api-access-8vm6q" (OuterVolumeSpecName: "kube-api-access-8vm6q") pod "560fee96-8b76-42bf-9a98-7252dd46590f" (UID: "560fee96-8b76-42bf-9a98-7252dd46590f"). InnerVolumeSpecName "kube-api-access-8vm6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.539506 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/560fee96-8b76-42bf-9a98-7252dd46590f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "560fee96-8b76-42bf-9a98-7252dd46590f" (UID: "560fee96-8b76-42bf-9a98-7252dd46590f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.590740 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.590806 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.631443 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.631716 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.631804 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.631558 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.631770 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.632102 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.632044 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.632391 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.632505 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.633154 4675 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.633261 4675 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.633327 4675 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.633392 4675 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/560fee96-8b76-42bf-9a98-7252dd46590f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.633463 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vm6q\" (UniqueName: \"kubernetes.io/projected/560fee96-8b76-42bf-9a98-7252dd46590f-kube-api-access-8vm6q\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.633531 4675 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.636787 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.735042 4675 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.741208 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 13:30:03 crc kubenswrapper[4675]: I1125 13:30:03.742076 4675 scope.go:117] "RemoveContainer" containerID="880cb3430d867f62b8dfef417a82ebf9a04c736fbb22c798aa92def74d8c5160" Nov 25 13:30:03 crc kubenswrapper[4675]: E1125 13:30:03.742466 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-75cf7cf5cb-gbbjk_openstack-operators(21978291-afd8-477d-9e86-80a465441902)\"" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" podUID="21978291-afd8-477d-9e86-80a465441902" Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.107957 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.108346 4675 scope.go:117] "RemoveContainer" containerID="7ed8f81e90626eebe0845ed795224d18fe91d4977556f9ac4d2f7fed4650f86e" Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.108373 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.110678 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" event={"ID":"560fee96-8b76-42bf-9a98-7252dd46590f","Type":"ContainerDied","Data":"4eeae421a55b0590dadbd555c277888dee1e7c844eb268012e824b00dbf5b77c"} Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.110715 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401290-lzbpc" Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.110717 4675 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4eeae421a55b0590dadbd555c277888dee1e7c844eb268012e824b00dbf5b77c" Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.492092 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q"] Nov 25 13:30:04 crc kubenswrapper[4675]: I1125 13:30:04.504597 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401245-hmc2q"] Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.544747 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95" path="/var/lib/kubelet/pods/eb1cf8b2-fe5b-466d-859b-5f4cd3c04b95/volumes" Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.546113 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.546316 4675 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.561369 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.561441 4675 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c1d6df97-1bf2-4eef-9360-6d5edc90f260" Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.569889 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 13:30:05 crc kubenswrapper[4675]: I1125 13:30:05.570106 4675 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c1d6df97-1bf2-4eef-9360-6d5edc90f260" Nov 25 13:30:07 crc kubenswrapper[4675]: I1125 13:30:07.266996 4675 scope.go:117] "RemoveContainer" containerID="1ab80084b48bf90bbfacbe39e352a72391ad90eedadfd137a2f858b734c00c1c" Nov 25 13:30:07 crc kubenswrapper[4675]: I1125 13:30:07.532233 4675 scope.go:117] "RemoveContainer" containerID="245b7d332c1a43faba896852533baff7bd4d21da44fd243ff94222d97aecafad" Nov 25 13:30:08 crc kubenswrapper[4675]: I1125 13:30:08.147583 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" event={"ID":"eb100a90-931c-4daa-8466-49a1ae50185b","Type":"ContainerStarted","Data":"ee7c29e55e031afa816d39eebb684ed0cef9da6eab7ba59a68df79a41ed20c45"} Nov 25 13:30:08 crc kubenswrapper[4675]: I1125 13:30:08.147894 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 13:30:09 crc kubenswrapper[4675]: I1125 13:30:09.533024 4675 scope.go:117] "RemoveContainer" containerID="e185ceec38b6578dde35bda5968714bf4f0add63d971c1980972457b1169eddb" Nov 25 13:30:10 crc kubenswrapper[4675]: I1125 13:30:10.185083 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" event={"ID":"a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb","Type":"ContainerStarted","Data":"4f8d3c9797bbe3b29f6059a551accbe80feab2f03bfbdeea23133f70c9987270"} Nov 25 13:30:10 crc kubenswrapper[4675]: I1125 13:30:10.185610 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 13:30:10 crc kubenswrapper[4675]: I1125 13:30:10.532176 4675 scope.go:117] "RemoveContainer" containerID="713b7c11be9183ed3f6f00dede9735971405a82a8e9bdc0b67921617fc67e678" Nov 25 13:30:10 crc kubenswrapper[4675]: I1125 13:30:10.648374 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 13:30:11 crc kubenswrapper[4675]: I1125 13:30:11.196556 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-msgjh" event={"ID":"64b432ef-6de9-4d8d-84ce-78f2097bf31e","Type":"ContainerStarted","Data":"7178fcfeb9f7ba35b32cdba37d4262444435698a0664049dd0a76056fdd8ec19"} Nov 25 13:30:12 crc kubenswrapper[4675]: I1125 13:30:12.533245 4675 scope.go:117] "RemoveContainer" containerID="d70ce3c404e1abb1bad1e7d40273d7f0dc1795f1dbcc32697cfe41d186eb93f0" Nov 25 13:30:12 crc kubenswrapper[4675]: I1125 13:30:12.533926 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:30:12 crc kubenswrapper[4675]: E1125 13:30:12.534159 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.217059 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" event={"ID":"966aefc3-6c87-4e64-b9ae-0c175f4d18a3","Type":"ContainerStarted","Data":"2f181c5824c4dd21725629cd76d576c1b10528e7e9333536416a0042b304eeb7"} Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.217257 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.532620 4675 scope.go:117] "RemoveContainer" containerID="a9a499a3f3a1265d4a30325465583fb9c98ba466cb16c4ffb841a6d5aeb5de91" Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.532960 4675 scope.go:117] "RemoveContainer" containerID="c336e990ebb6264c6a5780270f1b2fda09c179d307d38b1d8f1a91666caa1fe9" Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.533252 4675 scope.go:117] "RemoveContainer" containerID="74b368eae8e49f23bde8dce48738a900c57042bbaf040ec961bd87a2ecf78f34" Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.533298 4675 scope.go:117] "RemoveContainer" containerID="82305ee78a29bc5f34b60ec6bafbeb7728b446b6c6409faec87a34f2f68d953e" Nov 25 13:30:13 crc kubenswrapper[4675]: I1125 13:30:13.533489 4675 scope.go:117] "RemoveContainer" containerID="c4905bc4dd6c3931ac300bfd9daa534df8b31a960f67d51955b3a94e431ecf2a" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.228373 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" event={"ID":"6fa6f393-fc29-4035-81da-a9965421c77f","Type":"ContainerStarted","Data":"95f5373c26dde687a7a24a877f8cdfe021e736f2c81e104793fa53940f711d00"} Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.228668 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.232027 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" event={"ID":"e6ff98cd-4075-49dd-b40b-d1923298513e","Type":"ContainerStarted","Data":"5a911615b4d533c32c5804f202992a9724c843b549562c11b77965499adeceb6"} Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.232273 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.234512 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" event={"ID":"33456bb6-8430-432c-ac26-1c43307141e3","Type":"ContainerStarted","Data":"52b860f7cf5634e3b85573aa380509a31669be085b6327887b39cfb53db8de44"} Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.234718 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.237456 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" event={"ID":"986b1a58-05d0-4beb-9199-a7564c809455","Type":"ContainerStarted","Data":"964061e8d43271d6d73696fbb8ab07ce913767b6ad2469063be781fca5699ca4"} Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.237736 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.239538 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" event={"ID":"a5a68379-3de8-4970-8ca1-ccf52f2d7ad8","Type":"ContainerStarted","Data":"01ca707a4ea6c9a34e990412d2be7cabf5df876db88946b7913926b3b88d1b45"} Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.532897 4675 scope.go:117] "RemoveContainer" containerID="b24cf421e30c55a1ed8531c0b8f9e4fa1ba202a4fce3c4c080b08e0358deadd1" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.533568 4675 scope.go:117] "RemoveContainer" containerID="9e76cb28cecd199a3e19dee58b7bffdaf8b9a4a913fb52247d266a3de1e17fbe" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.534057 4675 scope.go:117] "RemoveContainer" containerID="c093334f7755b255b4ed0d77967f3f6e355bc3ba1dd7faae606577cac801c703" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.534146 4675 scope.go:117] "RemoveContainer" containerID="a1e332e511e1a5d3bb512bc6965e3349223155f4dd7b317ef0d1768d9153434a" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.534740 4675 scope.go:117] "RemoveContainer" containerID="3db8a0e25772bd9c207974971eacda2fdabaa1ef67cdc0eb00be88a00bae360c" Nov 25 13:30:14 crc kubenswrapper[4675]: I1125 13:30:14.534835 4675 scope.go:117] "RemoveContainer" containerID="ec27808ea49d34358ab29ca84f1f5f4e6e091571a6067de575c138ae8848b82d" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.250662 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" event={"ID":"ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3","Type":"ContainerStarted","Data":"a12e98c399891bf45090f0df21219e6a8e98b10133b292327ab56d9e18d722ec"} Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.251590 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.255361 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" event={"ID":"8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1","Type":"ContainerStarted","Data":"599fe47ede27f820c58d7e4ea151e8b6dd555abeb5553df53f5008b5bf398779"} Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.256302 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.259390 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" event={"ID":"8d89af10-26a8-4d8b-aedf-8e450df0f28a","Type":"ContainerStarted","Data":"59dc38fcc0564d9e936424f7180e3666722af422a69c65da81c5ae1c603c5802"} Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.259629 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.263596 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" event={"ID":"51b6ef4f-14c9-4c56-b374-3183ccd5cacb","Type":"ContainerStarted","Data":"f4ab674fbe999b0578b954379389d47de03d6db12ce672e2a72798853c251000"} Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.264508 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.267628 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" event={"ID":"fbd303b9-17db-401e-acbf-1ef8219e36df","Type":"ContainerStarted","Data":"9d904e45a33fd028dbceadaacf9d68e60364bb8637f393c346af0fc547e458d8"} Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.268496 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.271657 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" event={"ID":"18941428-e287-4374-93e0-3209cdbbf7d7","Type":"ContainerStarted","Data":"5806ce9dffcfa7ebd82d1839ecf53c7a3629c8ac4a34a9f4d955c4bd76d9983a"} Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.272177 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 13:30:15 crc kubenswrapper[4675]: I1125 13:30:15.540948 4675 scope.go:117] "RemoveContainer" containerID="3cf16ae4e9d2b5860b71b0e4e5e635cc21322710d9745d95a2e8342a08198562" Nov 25 13:30:16 crc kubenswrapper[4675]: I1125 13:30:16.281182 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" event={"ID":"a271eb36-50fc-40c6-8885-f97f281c1150","Type":"ContainerStarted","Data":"f28610c46a784fd4b34a96dcc22f6ac3021e6deb06f71548989c5f2488653c5b"} Nov 25 13:30:16 crc kubenswrapper[4675]: I1125 13:30:16.532578 4675 scope.go:117] "RemoveContainer" containerID="3e87e6756fa19f4da2677a2395d77d876e45ce138b3cf3fc7f7525283353294e" Nov 25 13:30:16 crc kubenswrapper[4675]: I1125 13:30:16.533135 4675 scope.go:117] "RemoveContainer" containerID="eb9aff47de6c5752016cc28c464d5a476eeaa7dfb1697cc6f103343bd54b5a2f" Nov 25 13:30:16 crc kubenswrapper[4675]: I1125 13:30:16.534367 4675 scope.go:117] "RemoveContainer" containerID="cf784847706feb1ed3acda7439da7b4b7e05c6ebac80d4b47be74e23514f2636" Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.292742 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" event={"ID":"e8f46595-6a0c-4b55-9839-3360395606f7","Type":"ContainerStarted","Data":"f61c4f269fd2ab12a0f0a3069a85c5a23bea53e0b85faf0c7ead14a0b7869f7d"} Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.293407 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.295362 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" event={"ID":"d4608140-77a4-4067-b58e-a95ae2249fea","Type":"ContainerStarted","Data":"6374c7688959ac2c93b9da3a34a650860f523acaf3333a14b24c9bbe24a85006"} Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.295653 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.297866 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" event={"ID":"223d4b40-6f09-41f5-816d-7e82b45b4b90","Type":"ContainerStarted","Data":"62fc4faa1b62c296b281ff6c87d7593cd7c7cf0630d1e679d654bceb98bd6dee"} Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.298171 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.532169 4675 scope.go:117] "RemoveContainer" containerID="e45da40461f4ba6b49a15fd3e1bb480e28c28098a966fc6b39f1f66d595e5680" Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.532429 4675 scope.go:117] "RemoveContainer" containerID="e50651a32f30ef5f8c249945a416bca0d6810134c1dc1c5b8c18b3eed8f94673" Nov 25 13:30:17 crc kubenswrapper[4675]: I1125 13:30:17.533256 4675 scope.go:117] "RemoveContainer" containerID="880cb3430d867f62b8dfef417a82ebf9a04c736fbb22c798aa92def74d8c5160" Nov 25 13:30:18 crc kubenswrapper[4675]: I1125 13:30:18.307999 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" event={"ID":"9495eb50-984d-4069-bd95-719e714b1178","Type":"ContainerStarted","Data":"b76ffb6e47d562da27aa51c2d9188c280ce7c788418a38be7072e90ce64b5b8e"} Nov 25 13:30:18 crc kubenswrapper[4675]: I1125 13:30:18.309420 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 13:30:18 crc kubenswrapper[4675]: I1125 13:30:18.310090 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" event={"ID":"21978291-afd8-477d-9e86-80a465441902","Type":"ContainerStarted","Data":"6f9fbaaa82f2bcd910dd436c554dc69edf037e82e4e7b5b92d17a69c88f92afd"} Nov 25 13:30:18 crc kubenswrapper[4675]: I1125 13:30:18.310656 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 13:30:18 crc kubenswrapper[4675]: I1125 13:30:18.312939 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" event={"ID":"88da95fd-fdf9-402d-90d8-e742f92cffbb","Type":"ContainerStarted","Data":"7f41325a049f8cefc708fcbb7b7d86e7935123a9051e4fbaea188b82a94b02a8"} Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.548845 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5689899996-24rxr" Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.563641 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748967c98-4hkh4" Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.706229 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-nkq7r" Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.786174 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-n6gqt" Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.845185 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-577c5f6d94-svnp9" Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.962854 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-4cprh" Nov 25 13:30:21 crc kubenswrapper[4675]: I1125 13:30:21.968214 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-6bd966bbd4-hzjqx" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.017054 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7d6f5d799-7p97w" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.090424 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-tq6jf" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.153544 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-jdxms" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.232449 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.236859 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6b6c55ffd5-84vzh" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.242662 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7979c68bc7-m6zl4" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.283415 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dls9t" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.438915 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-4pmkv" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.511492 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.517168 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-58487d9bf4-9rf4d" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.599691 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-cc9f5bc5c-lr9bx" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.628661 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-867d87977b-2z8vf" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.806996 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-r6m74" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.887946 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 13:30:22 crc kubenswrapper[4675]: I1125 13:30:22.889913 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-54485f899-6pvms" Nov 25 13:30:23 crc kubenswrapper[4675]: I1125 13:30:23.746947 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-75cf7cf5cb-gbbjk" Nov 25 13:30:25 crc kubenswrapper[4675]: I1125 13:30:25.537980 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:30:25 crc kubenswrapper[4675]: E1125 13:30:25.538716 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:30:40 crc kubenswrapper[4675]: I1125 13:30:40.532853 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:30:40 crc kubenswrapper[4675]: E1125 13:30:40.533589 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:30:42 crc kubenswrapper[4675]: I1125 13:30:42.985950 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5db469f446-85gxv" Nov 25 13:30:52 crc kubenswrapper[4675]: I1125 13:30:52.532834 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:30:52 crc kubenswrapper[4675]: E1125 13:30:52.533639 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:31:05 crc kubenswrapper[4675]: I1125 13:31:05.539173 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:31:05 crc kubenswrapper[4675]: E1125 13:31:05.539945 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:31:07 crc kubenswrapper[4675]: I1125 13:31:07.347101 4675 scope.go:117] "RemoveContainer" containerID="5b3d4836ce2117be43de4f8cb5029a566a0cfeb94661a3c5385fb8438467023b" Nov 25 13:31:07 crc kubenswrapper[4675]: I1125 13:31:07.369435 4675 scope.go:117] "RemoveContainer" containerID="fd6441701b1fba33bc4d4b66ecaa7e01f8c51c94d35b39521e3ef7139a15df9c" Nov 25 13:31:07 crc kubenswrapper[4675]: I1125 13:31:07.443321 4675 scope.go:117] "RemoveContainer" containerID="71e92723f6fa92ebfe8bc489215e80a0dbfe904f24497af35dc292963f17dc5f" Nov 25 13:31:19 crc kubenswrapper[4675]: I1125 13:31:19.532024 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:31:19 crc kubenswrapper[4675]: E1125 13:31:19.533116 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:31:34 crc kubenswrapper[4675]: I1125 13:31:34.532726 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:31:34 crc kubenswrapper[4675]: E1125 13:31:34.533465 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:31:45 crc kubenswrapper[4675]: I1125 13:31:45.541636 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:31:45 crc kubenswrapper[4675]: E1125 13:31:45.542612 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.752112 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzvbd/must-gather-rwf9k"] Nov 25 13:31:49 crc kubenswrapper[4675]: E1125 13:31:49.752786 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560fee96-8b76-42bf-9a98-7252dd46590f" containerName="collect-profiles" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.752799 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="560fee96-8b76-42bf-9a98-7252dd46590f" containerName="collect-profiles" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.753033 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="560fee96-8b76-42bf-9a98-7252dd46590f" containerName="collect-profiles" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.754026 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.756880 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-bzvbd"/"openshift-service-ca.crt" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.756919 4675 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-bzvbd"/"default-dockercfg-vlbjm" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.756960 4675 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-bzvbd"/"kube-root-ca.crt" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.769338 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bzvbd/must-gather-rwf9k"] Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.910955 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/06df0284-68d4-4144-a705-c62f63ef2a32-must-gather-output\") pod \"must-gather-rwf9k\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:49 crc kubenswrapper[4675]: I1125 13:31:49.911021 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smpv5\" (UniqueName: \"kubernetes.io/projected/06df0284-68d4-4144-a705-c62f63ef2a32-kube-api-access-smpv5\") pod \"must-gather-rwf9k\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:50 crc kubenswrapper[4675]: I1125 13:31:50.012961 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/06df0284-68d4-4144-a705-c62f63ef2a32-must-gather-output\") pod \"must-gather-rwf9k\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:50 crc kubenswrapper[4675]: I1125 13:31:50.013318 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smpv5\" (UniqueName: \"kubernetes.io/projected/06df0284-68d4-4144-a705-c62f63ef2a32-kube-api-access-smpv5\") pod \"must-gather-rwf9k\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:50 crc kubenswrapper[4675]: I1125 13:31:50.013549 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/06df0284-68d4-4144-a705-c62f63ef2a32-must-gather-output\") pod \"must-gather-rwf9k\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:50 crc kubenswrapper[4675]: I1125 13:31:50.037057 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smpv5\" (UniqueName: \"kubernetes.io/projected/06df0284-68d4-4144-a705-c62f63ef2a32-kube-api-access-smpv5\") pod \"must-gather-rwf9k\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:50 crc kubenswrapper[4675]: I1125 13:31:50.072594 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:31:50 crc kubenswrapper[4675]: I1125 13:31:50.804876 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bzvbd/must-gather-rwf9k"] Nov 25 13:31:51 crc kubenswrapper[4675]: I1125 13:31:51.202834 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" event={"ID":"06df0284-68d4-4144-a705-c62f63ef2a32","Type":"ContainerStarted","Data":"1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87"} Nov 25 13:31:51 crc kubenswrapper[4675]: I1125 13:31:51.203103 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" event={"ID":"06df0284-68d4-4144-a705-c62f63ef2a32","Type":"ContainerStarted","Data":"2831a6284af9bd7c3df3f8a4a90cf84483f25c1f83e9488682a7b7482f56a0bd"} Nov 25 13:31:52 crc kubenswrapper[4675]: I1125 13:31:52.231148 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" event={"ID":"06df0284-68d4-4144-a705-c62f63ef2a32","Type":"ContainerStarted","Data":"fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f"} Nov 25 13:31:52 crc kubenswrapper[4675]: I1125 13:31:52.255193 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" podStartSLOduration=3.255170833 podStartE2EDuration="3.255170833s" podCreationTimestamp="2025-11-25 13:31:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:31:52.252071256 +0000 UTC m=+3857.423663627" watchObservedRunningTime="2025-11-25 13:31:52.255170833 +0000 UTC m=+3857.426763174" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.555052 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-k2lv9"] Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.556967 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.631001 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxjp4\" (UniqueName: \"kubernetes.io/projected/f335a214-354d-4796-a9bd-e3c33541a056-kube-api-access-cxjp4\") pod \"crc-debug-k2lv9\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.631167 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f335a214-354d-4796-a9bd-e3c33541a056-host\") pod \"crc-debug-k2lv9\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.732746 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f335a214-354d-4796-a9bd-e3c33541a056-host\") pod \"crc-debug-k2lv9\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.732899 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxjp4\" (UniqueName: \"kubernetes.io/projected/f335a214-354d-4796-a9bd-e3c33541a056-kube-api-access-cxjp4\") pod \"crc-debug-k2lv9\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.732939 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f335a214-354d-4796-a9bd-e3c33541a056-host\") pod \"crc-debug-k2lv9\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.764252 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxjp4\" (UniqueName: \"kubernetes.io/projected/f335a214-354d-4796-a9bd-e3c33541a056-kube-api-access-cxjp4\") pod \"crc-debug-k2lv9\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: I1125 13:31:55.875988 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:31:55 crc kubenswrapper[4675]: W1125 13:31:55.912375 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf335a214_354d_4796_a9bd_e3c33541a056.slice/crio-e86d4cf91fd4b723fcd2314e54127a79fc23f34525b28511ad36c3865b63a9ea WatchSource:0}: Error finding container e86d4cf91fd4b723fcd2314e54127a79fc23f34525b28511ad36c3865b63a9ea: Status 404 returned error can't find the container with id e86d4cf91fd4b723fcd2314e54127a79fc23f34525b28511ad36c3865b63a9ea Nov 25 13:31:56 crc kubenswrapper[4675]: I1125 13:31:56.266677 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" event={"ID":"f335a214-354d-4796-a9bd-e3c33541a056","Type":"ContainerStarted","Data":"b346b33775019d2e77463c13a8cf9b66b198a2010a9abffcc1f37c69798176f8"} Nov 25 13:31:56 crc kubenswrapper[4675]: I1125 13:31:56.267235 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" event={"ID":"f335a214-354d-4796-a9bd-e3c33541a056","Type":"ContainerStarted","Data":"e86d4cf91fd4b723fcd2314e54127a79fc23f34525b28511ad36c3865b63a9ea"} Nov 25 13:31:56 crc kubenswrapper[4675]: I1125 13:31:56.285358 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" podStartSLOduration=1.285330609 podStartE2EDuration="1.285330609s" podCreationTimestamp="2025-11-25 13:31:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 13:31:56.280392223 +0000 UTC m=+3861.451984574" watchObservedRunningTime="2025-11-25 13:31:56.285330609 +0000 UTC m=+3861.456922950" Nov 25 13:31:59 crc kubenswrapper[4675]: I1125 13:31:59.532785 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:31:59 crc kubenswrapper[4675]: E1125 13:31:59.533421 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:32:10 crc kubenswrapper[4675]: I1125 13:32:10.532855 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:32:10 crc kubenswrapper[4675]: E1125 13:32:10.533628 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:32:21 crc kubenswrapper[4675]: I1125 13:32:21.533136 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:32:21 crc kubenswrapper[4675]: E1125 13:32:21.534088 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:32:31 crc kubenswrapper[4675]: I1125 13:32:31.635637 4675 generic.go:334] "Generic (PLEG): container finished" podID="f335a214-354d-4796-a9bd-e3c33541a056" containerID="b346b33775019d2e77463c13a8cf9b66b198a2010a9abffcc1f37c69798176f8" exitCode=0 Nov 25 13:32:31 crc kubenswrapper[4675]: I1125 13:32:31.636164 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" event={"ID":"f335a214-354d-4796-a9bd-e3c33541a056","Type":"ContainerDied","Data":"b346b33775019d2e77463c13a8cf9b66b198a2010a9abffcc1f37c69798176f8"} Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.772859 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.820197 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f335a214-354d-4796-a9bd-e3c33541a056-host\") pod \"f335a214-354d-4796-a9bd-e3c33541a056\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.820366 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxjp4\" (UniqueName: \"kubernetes.io/projected/f335a214-354d-4796-a9bd-e3c33541a056-kube-api-access-cxjp4\") pod \"f335a214-354d-4796-a9bd-e3c33541a056\" (UID: \"f335a214-354d-4796-a9bd-e3c33541a056\") " Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.820495 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f335a214-354d-4796-a9bd-e3c33541a056-host" (OuterVolumeSpecName: "host") pod "f335a214-354d-4796-a9bd-e3c33541a056" (UID: "f335a214-354d-4796-a9bd-e3c33541a056"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.820895 4675 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f335a214-354d-4796-a9bd-e3c33541a056-host\") on node \"crc\" DevicePath \"\"" Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.821422 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-k2lv9"] Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.831494 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f335a214-354d-4796-a9bd-e3c33541a056-kube-api-access-cxjp4" (OuterVolumeSpecName: "kube-api-access-cxjp4") pod "f335a214-354d-4796-a9bd-e3c33541a056" (UID: "f335a214-354d-4796-a9bd-e3c33541a056"). InnerVolumeSpecName "kube-api-access-cxjp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.836614 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-k2lv9"] Nov 25 13:32:32 crc kubenswrapper[4675]: I1125 13:32:32.923294 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxjp4\" (UniqueName: \"kubernetes.io/projected/f335a214-354d-4796-a9bd-e3c33541a056-kube-api-access-cxjp4\") on node \"crc\" DevicePath \"\"" Nov 25 13:32:33 crc kubenswrapper[4675]: I1125 13:32:33.532336 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:32:33 crc kubenswrapper[4675]: E1125 13:32:33.532601 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:32:33 crc kubenswrapper[4675]: I1125 13:32:33.544197 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f335a214-354d-4796-a9bd-e3c33541a056" path="/var/lib/kubelet/pods/f335a214-354d-4796-a9bd-e3c33541a056/volumes" Nov 25 13:32:33 crc kubenswrapper[4675]: I1125 13:32:33.658903 4675 scope.go:117] "RemoveContainer" containerID="b346b33775019d2e77463c13a8cf9b66b198a2010a9abffcc1f37c69798176f8" Nov 25 13:32:33 crc kubenswrapper[4675]: I1125 13:32:33.658980 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-k2lv9" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.042805 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-gqkxx"] Nov 25 13:32:34 crc kubenswrapper[4675]: E1125 13:32:34.043542 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f335a214-354d-4796-a9bd-e3c33541a056" containerName="container-00" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.043563 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="f335a214-354d-4796-a9bd-e3c33541a056" containerName="container-00" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.044109 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="f335a214-354d-4796-a9bd-e3c33541a056" containerName="container-00" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.045231 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.147781 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhrz5\" (UniqueName: \"kubernetes.io/projected/b458579f-a092-4aba-a91f-269edf95221b-kube-api-access-jhrz5\") pod \"crc-debug-gqkxx\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.148218 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b458579f-a092-4aba-a91f-269edf95221b-host\") pod \"crc-debug-gqkxx\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.250258 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b458579f-a092-4aba-a91f-269edf95221b-host\") pod \"crc-debug-gqkxx\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.250395 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b458579f-a092-4aba-a91f-269edf95221b-host\") pod \"crc-debug-gqkxx\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.250457 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhrz5\" (UniqueName: \"kubernetes.io/projected/b458579f-a092-4aba-a91f-269edf95221b-kube-api-access-jhrz5\") pod \"crc-debug-gqkxx\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.278586 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhrz5\" (UniqueName: \"kubernetes.io/projected/b458579f-a092-4aba-a91f-269edf95221b-kube-api-access-jhrz5\") pod \"crc-debug-gqkxx\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.366992 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:34 crc kubenswrapper[4675]: I1125 13:32:34.668234 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" event={"ID":"b458579f-a092-4aba-a91f-269edf95221b","Type":"ContainerStarted","Data":"c76b275e7235765ab97b3674e3f29bf36676c48048d3c1024ed3d067899d6241"} Nov 25 13:32:35 crc kubenswrapper[4675]: I1125 13:32:35.684526 4675 generic.go:334] "Generic (PLEG): container finished" podID="b458579f-a092-4aba-a91f-269edf95221b" containerID="a5600642b549e8b0e2baca9f48b72d06237fc61c53bb34c8db746d8f0cef2261" exitCode=0 Nov 25 13:32:35 crc kubenswrapper[4675]: I1125 13:32:35.684638 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" event={"ID":"b458579f-a092-4aba-a91f-269edf95221b","Type":"ContainerDied","Data":"a5600642b549e8b0e2baca9f48b72d06237fc61c53bb34c8db746d8f0cef2261"} Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.129125 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-gqkxx"] Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.137727 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-gqkxx"] Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.802335 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.914241 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhrz5\" (UniqueName: \"kubernetes.io/projected/b458579f-a092-4aba-a91f-269edf95221b-kube-api-access-jhrz5\") pod \"b458579f-a092-4aba-a91f-269edf95221b\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.914602 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b458579f-a092-4aba-a91f-269edf95221b-host\") pod \"b458579f-a092-4aba-a91f-269edf95221b\" (UID: \"b458579f-a092-4aba-a91f-269edf95221b\") " Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.914729 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b458579f-a092-4aba-a91f-269edf95221b-host" (OuterVolumeSpecName: "host") pod "b458579f-a092-4aba-a91f-269edf95221b" (UID: "b458579f-a092-4aba-a91f-269edf95221b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.915448 4675 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b458579f-a092-4aba-a91f-269edf95221b-host\") on node \"crc\" DevicePath \"\"" Nov 25 13:32:36 crc kubenswrapper[4675]: I1125 13:32:36.928078 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b458579f-a092-4aba-a91f-269edf95221b-kube-api-access-jhrz5" (OuterVolumeSpecName: "kube-api-access-jhrz5") pod "b458579f-a092-4aba-a91f-269edf95221b" (UID: "b458579f-a092-4aba-a91f-269edf95221b"). InnerVolumeSpecName "kube-api-access-jhrz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.017085 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhrz5\" (UniqueName: \"kubernetes.io/projected/b458579f-a092-4aba-a91f-269edf95221b-kube-api-access-jhrz5\") on node \"crc\" DevicePath \"\"" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.312464 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-2dzj6"] Nov 25 13:32:37 crc kubenswrapper[4675]: E1125 13:32:37.312899 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b458579f-a092-4aba-a91f-269edf95221b" containerName="container-00" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.312916 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="b458579f-a092-4aba-a91f-269edf95221b" containerName="container-00" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.313126 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="b458579f-a092-4aba-a91f-269edf95221b" containerName="container-00" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.313705 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.424984 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0433869a-a6f0-49ff-b266-1c6893877d3a-host\") pod \"crc-debug-2dzj6\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.425198 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwvdm\" (UniqueName: \"kubernetes.io/projected/0433869a-a6f0-49ff-b266-1c6893877d3a-kube-api-access-zwvdm\") pod \"crc-debug-2dzj6\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.526612 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0433869a-a6f0-49ff-b266-1c6893877d3a-host\") pod \"crc-debug-2dzj6\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.526761 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwvdm\" (UniqueName: \"kubernetes.io/projected/0433869a-a6f0-49ff-b266-1c6893877d3a-kube-api-access-zwvdm\") pod \"crc-debug-2dzj6\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.526780 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0433869a-a6f0-49ff-b266-1c6893877d3a-host\") pod \"crc-debug-2dzj6\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.542766 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b458579f-a092-4aba-a91f-269edf95221b" path="/var/lib/kubelet/pods/b458579f-a092-4aba-a91f-269edf95221b/volumes" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.555492 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwvdm\" (UniqueName: \"kubernetes.io/projected/0433869a-a6f0-49ff-b266-1c6893877d3a-kube-api-access-zwvdm\") pod \"crc-debug-2dzj6\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.631060 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.704342 4675 scope.go:117] "RemoveContainer" containerID="a5600642b549e8b0e2baca9f48b72d06237fc61c53bb34c8db746d8f0cef2261" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.704350 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-gqkxx" Nov 25 13:32:37 crc kubenswrapper[4675]: I1125 13:32:37.707373 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" event={"ID":"0433869a-a6f0-49ff-b266-1c6893877d3a","Type":"ContainerStarted","Data":"e9092dba5d4d16e5299085b13570432f9f6173516d6e17f26d7cbab7733bc0c0"} Nov 25 13:32:38 crc kubenswrapper[4675]: I1125 13:32:38.723200 4675 generic.go:334] "Generic (PLEG): container finished" podID="0433869a-a6f0-49ff-b266-1c6893877d3a" containerID="c595540f6c0e629cbefa4e1792adfb9a9f94f8d89a9695fd24939e447200de51" exitCode=0 Nov 25 13:32:38 crc kubenswrapper[4675]: I1125 13:32:38.723290 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" event={"ID":"0433869a-a6f0-49ff-b266-1c6893877d3a","Type":"ContainerDied","Data":"c595540f6c0e629cbefa4e1792adfb9a9f94f8d89a9695fd24939e447200de51"} Nov 25 13:32:38 crc kubenswrapper[4675]: I1125 13:32:38.768294 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-2dzj6"] Nov 25 13:32:38 crc kubenswrapper[4675]: I1125 13:32:38.777801 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzvbd/crc-debug-2dzj6"] Nov 25 13:32:39 crc kubenswrapper[4675]: I1125 13:32:39.844844 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:39 crc kubenswrapper[4675]: I1125 13:32:39.969020 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0433869a-a6f0-49ff-b266-1c6893877d3a-host\") pod \"0433869a-a6f0-49ff-b266-1c6893877d3a\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " Nov 25 13:32:39 crc kubenswrapper[4675]: I1125 13:32:39.969194 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0433869a-a6f0-49ff-b266-1c6893877d3a-host" (OuterVolumeSpecName: "host") pod "0433869a-a6f0-49ff-b266-1c6893877d3a" (UID: "0433869a-a6f0-49ff-b266-1c6893877d3a"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 13:32:39 crc kubenswrapper[4675]: I1125 13:32:39.969131 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwvdm\" (UniqueName: \"kubernetes.io/projected/0433869a-a6f0-49ff-b266-1c6893877d3a-kube-api-access-zwvdm\") pod \"0433869a-a6f0-49ff-b266-1c6893877d3a\" (UID: \"0433869a-a6f0-49ff-b266-1c6893877d3a\") " Nov 25 13:32:39 crc kubenswrapper[4675]: I1125 13:32:39.969785 4675 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0433869a-a6f0-49ff-b266-1c6893877d3a-host\") on node \"crc\" DevicePath \"\"" Nov 25 13:32:39 crc kubenswrapper[4675]: I1125 13:32:39.981036 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0433869a-a6f0-49ff-b266-1c6893877d3a-kube-api-access-zwvdm" (OuterVolumeSpecName: "kube-api-access-zwvdm") pod "0433869a-a6f0-49ff-b266-1c6893877d3a" (UID: "0433869a-a6f0-49ff-b266-1c6893877d3a"). InnerVolumeSpecName "kube-api-access-zwvdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:32:40 crc kubenswrapper[4675]: I1125 13:32:40.071102 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwvdm\" (UniqueName: \"kubernetes.io/projected/0433869a-a6f0-49ff-b266-1c6893877d3a-kube-api-access-zwvdm\") on node \"crc\" DevicePath \"\"" Nov 25 13:32:40 crc kubenswrapper[4675]: I1125 13:32:40.743562 4675 scope.go:117] "RemoveContainer" containerID="c595540f6c0e629cbefa4e1792adfb9a9f94f8d89a9695fd24939e447200de51" Nov 25 13:32:40 crc kubenswrapper[4675]: I1125 13:32:40.743617 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/crc-debug-2dzj6" Nov 25 13:32:41 crc kubenswrapper[4675]: I1125 13:32:41.544585 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0433869a-a6f0-49ff-b266-1c6893877d3a" path="/var/lib/kubelet/pods/0433869a-a6f0-49ff-b266-1c6893877d3a/volumes" Nov 25 13:32:48 crc kubenswrapper[4675]: I1125 13:32:48.532411 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:32:48 crc kubenswrapper[4675]: E1125 13:32:48.533060 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:33:03 crc kubenswrapper[4675]: I1125 13:33:03.532043 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:33:03 crc kubenswrapper[4675]: E1125 13:33:03.532974 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:33:06 crc kubenswrapper[4675]: I1125 13:33:06.683873 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ff8694d7d-s9pzw_51325415-d3b2-4852-bdce-6861cd1dc391/barbican-api/0.log" Nov 25 13:33:06 crc kubenswrapper[4675]: I1125 13:33:06.963133 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-858b4645fd-rr59w_a4640f4e-98fe-438c-bc12-38c11c62f997/barbican-keystone-listener/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.184238 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-858b4645fd-rr59w_a4640f4e-98fe-438c-bc12-38c11c62f997/barbican-keystone-listener-log/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.322048 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f59d94c69-6bgtx_654bfacb-d4b4-45a3-ae90-92496e1b5e9e/barbican-worker/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.523048 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f59d94c69-6bgtx_654bfacb-d4b4-45a3-ae90-92496e1b5e9e/barbican-worker-log/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.699785 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-4jk2f_3f838da8-c090-474d-826e-592b92857777/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.749706 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ff8694d7d-s9pzw_51325415-d3b2-4852-bdce-6861cd1dc391/barbican-api-log/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.795942 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/ceilometer-central-agent/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.929217 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/ceilometer-notification-agent/0.log" Nov 25 13:33:07 crc kubenswrapper[4675]: I1125 13:33:07.982524 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/proxy-httpd/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.042514 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a603c80e-1d69-41a1-99fb-dfde13a182d6/sg-core/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.193678 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_90a143ed-4c09-4ac7-8dd9-869c15e9ef3c/cinder-api/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.262235 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_90a143ed-4c09-4ac7-8dd9-869c15e9ef3c/cinder-api-log/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.411859 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_46c62718-04bb-45f3-bd9c-08957f1241a7/cinder-scheduler/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.456208 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_46c62718-04bb-45f3-bd9c-08957f1241a7/probe/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.608618 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-sd6xb_17c7ad50-1a29-478e-b1df-a0084c3142df/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.791002 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-ndtm5_3d6b5b05-8b7a-4ff2-8972-0f07e3bb9850/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:08 crc kubenswrapper[4675]: I1125 13:33:08.959996 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-r9wsm_0e609a3d-0025-458f-8086-595a9923a23d/init/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.064475 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-r9wsm_0e609a3d-0025-458f-8086-595a9923a23d/init/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.135852 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-6stz8_25fb1275-2632-4271-b41e-909adabbdf27/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.183266 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-r9wsm_0e609a3d-0025-458f-8086-595a9923a23d/dnsmasq-dns/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.595060 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5d10980d-6f3c-4a3f-a4ce-30e07d985393/glance-httpd/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.609033 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5d10980d-6f3c-4a3f-a4ce-30e07d985393/glance-log/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.791535 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d478f76e-2629-426c-8a29-60b4cce437f2/glance-httpd/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.823762 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d478f76e-2629-426c-8a29-60b4cce437f2/glance-log/0.log" Nov 25 13:33:09 crc kubenswrapper[4675]: I1125 13:33:09.943280 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-85d4f84f96-fcncp_412d2040-4c83-4443-989e-cc844466e840/horizon/2.log" Nov 25 13:33:10 crc kubenswrapper[4675]: I1125 13:33:10.166159 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-85d4f84f96-fcncp_412d2040-4c83-4443-989e-cc844466e840/horizon/1.log" Nov 25 13:33:10 crc kubenswrapper[4675]: I1125 13:33:10.241916 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-c46tf_b04ef6a2-5500-4d9d-87ad-1ec2762a5a46/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:10 crc kubenswrapper[4675]: I1125 13:33:10.445517 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-85d4f84f96-fcncp_412d2040-4c83-4443-989e-cc844466e840/horizon-log/0.log" Nov 25 13:33:10 crc kubenswrapper[4675]: I1125 13:33:10.518302 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-vgqfd_21e88661-854c-481d-b024-c7c87ea9373a/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:10 crc kubenswrapper[4675]: I1125 13:33:10.862317 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-686987849d-794s5_ef21ec22-c1e4-490d-b59c-8ffec71be972/keystone-api/0.log" Nov 25 13:33:10 crc kubenswrapper[4675]: I1125 13:33:10.923552 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401261-blc64_b1779f11-7333-4094-a1ee-b509cc09da52/keystone-cron/0.log" Nov 25 13:33:11 crc kubenswrapper[4675]: I1125 13:33:11.069519 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_76b55738-1ee0-41a4-950a-faa08432f67f/kube-state-metrics/3.log" Nov 25 13:33:11 crc kubenswrapper[4675]: I1125 13:33:11.162580 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-q7kgr_c466bd75-6cb4-452f-a4fa-d9a5dbec6840/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:11 crc kubenswrapper[4675]: I1125 13:33:11.164531 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_76b55738-1ee0-41a4-950a-faa08432f67f/kube-state-metrics/2.log" Nov 25 13:33:11 crc kubenswrapper[4675]: I1125 13:33:11.664024 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6c6c8d8969-kqpxz_a69ab647-c53a-4fcc-86f0-d92a9eebf587/neutron-httpd/0.log" Nov 25 13:33:11 crc kubenswrapper[4675]: I1125 13:33:11.691885 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6c6c8d8969-kqpxz_a69ab647-c53a-4fcc-86f0-d92a9eebf587/neutron-api/0.log" Nov 25 13:33:11 crc kubenswrapper[4675]: I1125 13:33:11.803109 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-w5gtg_4ee951ab-e497-4274-9251-85c92c498b0e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:12 crc kubenswrapper[4675]: I1125 13:33:12.133049 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_cf087269-8e7f-416e-9492-b3ccb72f40d0/nova-api-log/0.log" Nov 25 13:33:12 crc kubenswrapper[4675]: I1125 13:33:12.517602 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_98ffb1d8-055d-41be-8c54-7282e6e1c36d/nova-cell0-conductor-conductor/0.log" Nov 25 13:33:12 crc kubenswrapper[4675]: I1125 13:33:12.585517 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_cf087269-8e7f-416e-9492-b3ccb72f40d0/nova-api-api/0.log" Nov 25 13:33:12 crc kubenswrapper[4675]: I1125 13:33:12.607506 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_02b15b4e-fe03-46fb-9a34-c4e496129490/nova-cell1-conductor-conductor/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.074567 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-l9q4l_4c8ba7b5-22cd-44f4-9389-1f352f9a2368/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.170238 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_10485665-29b9-4a6f-ac17-3cca271b761d/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.195020 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_f3cab755-3df5-4cfb-880e-f842da175aeb/nova-metadata-log/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.635309 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c8756ef3-0fbe-457a-93ed-957baf6a60da/mysql-bootstrap/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.727036 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_431204a3-00f5-425a-b473-86f86e2bc600/nova-scheduler-scheduler/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.872475 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c8756ef3-0fbe-457a-93ed-957baf6a60da/galera/0.log" Nov 25 13:33:13 crc kubenswrapper[4675]: I1125 13:33:13.875655 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c8756ef3-0fbe-457a-93ed-957baf6a60da/mysql-bootstrap/0.log" Nov 25 13:33:14 crc kubenswrapper[4675]: I1125 13:33:14.144273 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3d992e9b-ee07-4194-90de-02816b3aec1e/mysql-bootstrap/0.log" Nov 25 13:33:14 crc kubenswrapper[4675]: I1125 13:33:14.369144 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3d992e9b-ee07-4194-90de-02816b3aec1e/mysql-bootstrap/0.log" Nov 25 13:33:14 crc kubenswrapper[4675]: I1125 13:33:14.506090 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3d992e9b-ee07-4194-90de-02816b3aec1e/galera/0.log" Nov 25 13:33:14 crc kubenswrapper[4675]: I1125 13:33:14.608162 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_f3cab755-3df5-4cfb-880e-f842da175aeb/nova-metadata-metadata/0.log" Nov 25 13:33:14 crc kubenswrapper[4675]: I1125 13:33:14.638371 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_4305dd31-2399-4e02-8b99-224a616e8c8c/openstackclient/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.012193 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-68qlk_6ef75e87-29e7-4d11-9547-430df2247d7b/openstack-network-exporter/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.014157 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovsdb-server-init/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.266035 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovs-vswitchd/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.303242 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovsdb-server/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.303761 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zx9pg_738ca4c6-0239-497f-aa30-001f7a06bf41/ovsdb-server-init/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.544377 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:33:15 crc kubenswrapper[4675]: E1125 13:33:15.544578 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.604202 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-vgv4n_bd0994da-34e6-4f4c-b8a5-cae4c7923df7/ovn-controller/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.678242 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vq5tr_5f3381e9-49d9-47ea-87dd-86442bf3394a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.947510 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d2d15dcc-e29e-4b04-8de0-911cc8190e33/openstack-network-exporter/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.964493 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d2d15dcc-e29e-4b04-8de0-911cc8190e33/ovn-northd/0.log" Nov 25 13:33:15 crc kubenswrapper[4675]: I1125 13:33:15.987616 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c215d8eb-d320-4245-8bdb-73b0d600ea49/openstack-network-exporter/0.log" Nov 25 13:33:16 crc kubenswrapper[4675]: I1125 13:33:16.242400 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c215d8eb-d320-4245-8bdb-73b0d600ea49/ovsdbserver-nb/0.log" Nov 25 13:33:16 crc kubenswrapper[4675]: I1125 13:33:16.324796 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f36454cf-1208-4320-8a1d-8df0afad3983/openstack-network-exporter/0.log" Nov 25 13:33:16 crc kubenswrapper[4675]: I1125 13:33:16.411163 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f36454cf-1208-4320-8a1d-8df0afad3983/ovsdbserver-sb/0.log" Nov 25 13:33:16 crc kubenswrapper[4675]: I1125 13:33:16.656505 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf4848886-8rwx5_1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34/placement-log/0.log" Nov 25 13:33:16 crc kubenswrapper[4675]: I1125 13:33:16.723212 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf4848886-8rwx5_1246b1f0-1ae2-49eb-8100-8fd8c4cb6b34/placement-api/0.log" Nov 25 13:33:16 crc kubenswrapper[4675]: I1125 13:33:16.792936 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_13f3fc9e-df33-4016-8d7e-a40112cdc27f/setup-container/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.004190 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_13f3fc9e-df33-4016-8d7e-a40112cdc27f/setup-container/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.090272 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_13f3fc9e-df33-4016-8d7e-a40112cdc27f/rabbitmq/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.182065 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa5e2576-e3fb-44a7-83ad-6193b6437ae0/setup-container/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.446227 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa5e2576-e3fb-44a7-83ad-6193b6437ae0/rabbitmq/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.495780 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa5e2576-e3fb-44a7-83ad-6193b6437ae0/setup-container/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.587340 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-8z4s5_13de1b17-7309-4325-9264-52182799c3be/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.768877 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-gcz54_876a0f8c-9396-49fe-b1b8-5c44e691a7c9/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:17 crc kubenswrapper[4675]: I1125 13:33:17.907879 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-rkp9z_25753af6-5930-488b-8ffc-8b905d803063/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:18 crc kubenswrapper[4675]: I1125 13:33:18.248437 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-87t9t_58949bc5-8d5f-4d04-bf70-eb2e0a55cda8/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:18 crc kubenswrapper[4675]: I1125 13:33:18.395535 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-mkh7c_0808241a-edce-45b6-ae18-7b0356549cf6/ssh-known-hosts-edpm-deployment/0.log" Nov 25 13:33:18 crc kubenswrapper[4675]: I1125 13:33:18.578887 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-675f685b59-np48s_08133520-c4c6-4b59-b426-d18290e4195a/proxy-server/0.log" Nov 25 13:33:18 crc kubenswrapper[4675]: I1125 13:33:18.615562 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-675f685b59-np48s_08133520-c4c6-4b59-b426-d18290e4195a/proxy-httpd/0.log" Nov 25 13:33:18 crc kubenswrapper[4675]: I1125 13:33:18.796395 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-g7rkg_959f7b20-344e-4759-8142-19a41f250c72/swift-ring-rebalance/0.log" Nov 25 13:33:18 crc kubenswrapper[4675]: I1125 13:33:18.932685 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-auditor/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.041904 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-replicator/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.064100 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-reaper/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.199906 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/account-server/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.242269 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-auditor/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.339733 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-server/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.342984 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-replicator/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.500108 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/container-updater/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.564520 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-auditor/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.627778 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-expirer/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.660237 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-replicator/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.789050 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-updater/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.805493 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/object-server/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.930364 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/rsync/0.log" Nov 25 13:33:19 crc kubenswrapper[4675]: I1125 13:33:19.935637 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_60249dd6-be73-49eb-861a-54bb77652335/swift-recon-cron/0.log" Nov 25 13:33:20 crc kubenswrapper[4675]: I1125 13:33:20.177945 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-tkrnj_3ca0e52a-979d-4834-a03d-135355de72db/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:20 crc kubenswrapper[4675]: I1125 13:33:20.342003 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_2beee9f5-8487-4f64-a55c-11f32c68c5fc/tempest-tests-tempest-tests-runner/0.log" Nov 25 13:33:20 crc kubenswrapper[4675]: I1125 13:33:20.496268 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_ac759df6-989d-47da-9259-b6d00e9e566e/test-operator-logs-container/0.log" Nov 25 13:33:20 crc kubenswrapper[4675]: I1125 13:33:20.674409 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-ncsz9_92805c79-2eb0-4562-9aed-1a7c7b88a5aa/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 13:33:30 crc kubenswrapper[4675]: I1125 13:33:30.532766 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:33:30 crc kubenswrapper[4675]: E1125 13:33:30.534845 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:33:31 crc kubenswrapper[4675]: I1125 13:33:31.674118 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6efda04f-52f8-48b1-9afd-f606c3a72d50/memcached/0.log" Nov 25 13:33:45 crc kubenswrapper[4675]: I1125 13:33:45.556319 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:33:46 crc kubenswrapper[4675]: I1125 13:33:46.388069 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"20662e96e925ea53ed9c61563d7d26afa7fc94d988ed906d74c50474d5b3fc65"} Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.248004 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/util/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.389054 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/util/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.452761 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/pull/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.457023 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/pull/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.630333 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/util/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.631183 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/pull/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.725407 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_418b8c45349733071182c1309fe6dc88d5684c19ac3fd929fe39207b208tqqw_2a55ae9f-fb9d-4e3b-bc4a-1c715a24ee82/extract/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.835395 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5689899996-24rxr_ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3/manager/3.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.900385 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5689899996-24rxr_ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3/kube-rbac-proxy/0.log" Nov 25 13:33:50 crc kubenswrapper[4675]: I1125 13:33:50.958630 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5689899996-24rxr_ad1d87d3-79d6-43d9-adc5-0ae5b52fa6e3/manager/2.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.134071 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-4hkh4_966aefc3-6c87-4e64-b9ae-0c175f4d18a3/kube-rbac-proxy/0.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.168720 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-4hkh4_966aefc3-6c87-4e64-b9ae-0c175f4d18a3/manager/3.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.200759 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-4hkh4_966aefc3-6c87-4e64-b9ae-0c175f4d18a3/manager/2.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.340232 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-4cprh_8d89af10-26a8-4d8b-aedf-8e450df0f28a/kube-rbac-proxy/0.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.400597 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-4cprh_8d89af10-26a8-4d8b-aedf-8e450df0f28a/manager/2.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.450091 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-4cprh_8d89af10-26a8-4d8b-aedf-8e450df0f28a/manager/3.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.616599 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6bd966bbd4-hzjqx_986b1a58-05d0-4beb-9199-a7564c809455/manager/3.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.632399 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6bd966bbd4-hzjqx_986b1a58-05d0-4beb-9199-a7564c809455/kube-rbac-proxy/0.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.653804 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6bd966bbd4-hzjqx_986b1a58-05d0-4beb-9199-a7564c809455/manager/2.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.812774 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-nkq7r_223d4b40-6f09-41f5-816d-7e82b45b4b90/kube-rbac-proxy/0.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.858780 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-nkq7r_223d4b40-6f09-41f5-816d-7e82b45b4b90/manager/3.log" Nov 25 13:33:51 crc kubenswrapper[4675]: I1125 13:33:51.914519 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-nkq7r_223d4b40-6f09-41f5-816d-7e82b45b4b90/manager/2.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.070920 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-n6gqt_51b6ef4f-14c9-4c56-b374-3183ccd5cacb/kube-rbac-proxy/0.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.091037 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-n6gqt_51b6ef4f-14c9-4c56-b374-3183ccd5cacb/manager/3.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.170254 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-n6gqt_51b6ef4f-14c9-4c56-b374-3183ccd5cacb/manager/2.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.237116 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-577c5f6d94-svnp9_18941428-e287-4374-93e0-3209cdbbf7d7/kube-rbac-proxy/0.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.294739 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-577c5f6d94-svnp9_18941428-e287-4374-93e0-3209cdbbf7d7/manager/3.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.800975 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-577c5f6d94-svnp9_18941428-e287-4374-93e0-3209cdbbf7d7/manager/2.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.849574 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-6pvms_a271eb36-50fc-40c6-8885-f97f281c1150/kube-rbac-proxy/0.log" Nov 25 13:33:52 crc kubenswrapper[4675]: I1125 13:33:52.912543 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-6pvms_a271eb36-50fc-40c6-8885-f97f281c1150/manager/3.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.034012 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-6pvms_a271eb36-50fc-40c6-8885-f97f281c1150/manager/2.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.128169 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7d6f5d799-7p97w_fbd303b9-17db-401e-acbf-1ef8219e36df/kube-rbac-proxy/0.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.134041 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7d6f5d799-7p97w_fbd303b9-17db-401e-acbf-1ef8219e36df/manager/3.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.270017 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7d6f5d799-7p97w_fbd303b9-17db-401e-acbf-1ef8219e36df/manager/2.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.377897 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-jdxms_e6ff98cd-4075-49dd-b40b-d1923298513e/kube-rbac-proxy/0.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.379654 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-jdxms_e6ff98cd-4075-49dd-b40b-d1923298513e/manager/3.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.513175 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-jdxms_e6ff98cd-4075-49dd-b40b-d1923298513e/manager/2.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.586409 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-tq6jf_33456bb6-8430-432c-ac26-1c43307141e3/kube-rbac-proxy/0.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.629687 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-tq6jf_33456bb6-8430-432c-ac26-1c43307141e3/manager/3.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.790618 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-tq6jf_33456bb6-8430-432c-ac26-1c43307141e3/manager/2.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.859890 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6b6c55ffd5-84vzh_a5a68379-3de8-4970-8ca1-ccf52f2d7ad8/kube-rbac-proxy/0.log" Nov 25 13:33:53 crc kubenswrapper[4675]: I1125 13:33:53.890069 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6b6c55ffd5-84vzh_a5a68379-3de8-4970-8ca1-ccf52f2d7ad8/manager/3.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.026900 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6b6c55ffd5-84vzh_a5a68379-3de8-4970-8ca1-ccf52f2d7ad8/manager/2.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.115951 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dls9t_d4608140-77a4-4067-b58e-a95ae2249fea/kube-rbac-proxy/0.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.151670 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dls9t_d4608140-77a4-4067-b58e-a95ae2249fea/manager/3.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.763950 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7979c68bc7-m6zl4_8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1/manager/3.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.778248 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dls9t_d4608140-77a4-4067-b58e-a95ae2249fea/manager/2.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.783790 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7979c68bc7-m6zl4_8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1/manager/2.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.809861 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7979c68bc7-m6zl4_8abddd77-b0e2-4fd7-bdf9-76f4f9f76fd1/kube-rbac-proxy/0.log" Nov 25 13:33:54 crc kubenswrapper[4675]: I1125 13:33:54.990243 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-xxcn9_bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48/kube-rbac-proxy/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.010088 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-xxcn9_bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48/manager/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.034351 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-xxcn9_bcc7bd3d-10ec-47a1-81b2-aa08d2d46c48/manager/1.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.181778 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-75cf7cf5cb-gbbjk_21978291-afd8-477d-9e86-80a465441902/kube-rbac-proxy/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.230933 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-75cf7cf5cb-gbbjk_21978291-afd8-477d-9e86-80a465441902/manager/3.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.273019 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-75cf7cf5cb-gbbjk_21978291-afd8-477d-9e86-80a465441902/manager/2.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.363159 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77b99896c6-rz556_01418b22-5bf7-4486-bc9c-fe8d6d757b3d/kube-rbac-proxy/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.423010 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77b99896c6-rz556_01418b22-5bf7-4486-bc9c-fe8d6d757b3d/operator/1.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.611361 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-9qsg7_c3e36545-46f5-4907-84b0-93ed29882b8c/registry-server/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.612428 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77b99896c6-rz556_01418b22-5bf7-4486-bc9c-fe8d6d757b3d/operator/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.865842 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-4pmkv_9495eb50-984d-4069-bd95-719e714b1178/kube-rbac-proxy/0.log" Nov 25 13:33:55 crc kubenswrapper[4675]: I1125 13:33:55.961920 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-4pmkv_9495eb50-984d-4069-bd95-719e714b1178/manager/3.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.012241 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-4pmkv_9495eb50-984d-4069-bd95-719e714b1178/manager/2.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.022366 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-2z8vf_e8f46595-6a0c-4b55-9839-3360395606f7/kube-rbac-proxy/0.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.117290 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-2z8vf_e8f46595-6a0c-4b55-9839-3360395606f7/manager/3.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.190863 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-2z8vf_e8f46595-6a0c-4b55-9839-3360395606f7/manager/2.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.242381 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_64b432ef-6de9-4d8d-84ce-78f2097bf31e/operator/2.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.272359 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-msgjh_64b432ef-6de9-4d8d-84ce-78f2097bf31e/operator/3.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.474942 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-cc9f5bc5c-lr9bx_6fa6f393-fc29-4035-81da-a9965421c77f/manager/3.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.503370 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-cc9f5bc5c-lr9bx_6fa6f393-fc29-4035-81da-a9965421c77f/kube-rbac-proxy/0.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.530448 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-cc9f5bc5c-lr9bx_6fa6f393-fc29-4035-81da-a9965421c77f/manager/2.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.594335 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58487d9bf4-9rf4d_88da95fd-fdf9-402d-90d8-e742f92cffbb/kube-rbac-proxy/0.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.712167 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58487d9bf4-9rf4d_88da95fd-fdf9-402d-90d8-e742f92cffbb/manager/3.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.728626 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58487d9bf4-9rf4d_88da95fd-fdf9-402d-90d8-e742f92cffbb/manager/2.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.729178 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-rkgfz_2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12/kube-rbac-proxy/0.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.828659 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-rkgfz_2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12/manager/1.log" Nov 25 13:33:56 crc kubenswrapper[4675]: I1125 13:33:56.930354 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-rkgfz_2a4ada9d-e71c-4bf7-84a2-9276bcb9fd12/manager/0.log" Nov 25 13:33:57 crc kubenswrapper[4675]: I1125 13:33:57.019542 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-r6m74_a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb/kube-rbac-proxy/0.log" Nov 25 13:33:57 crc kubenswrapper[4675]: I1125 13:33:57.043886 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-r6m74_a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb/manager/2.log" Nov 25 13:33:57 crc kubenswrapper[4675]: I1125 13:33:57.071459 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-r6m74_a135d7c8-ecde-4fe8-8eed-fc0d8c8a23bb/manager/3.log" Nov 25 13:34:14 crc kubenswrapper[4675]: I1125 13:34:14.721858 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-9ss4k_292203b1-555c-4331-90c7-f3a56ee042ba/control-plane-machine-set-operator/0.log" Nov 25 13:34:14 crc kubenswrapper[4675]: I1125 13:34:14.881098 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wh2qp_962d971d-f0de-4d22-a854-e4a65644b9b8/kube-rbac-proxy/0.log" Nov 25 13:34:14 crc kubenswrapper[4675]: I1125 13:34:14.925660 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wh2qp_962d971d-f0de-4d22-a854-e4a65644b9b8/machine-api-operator/0.log" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.268852 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fxn84"] Nov 25 13:34:25 crc kubenswrapper[4675]: E1125 13:34:25.269747 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0433869a-a6f0-49ff-b266-1c6893877d3a" containerName="container-00" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.269759 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="0433869a-a6f0-49ff-b266-1c6893877d3a" containerName="container-00" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.269970 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="0433869a-a6f0-49ff-b266-1c6893877d3a" containerName="container-00" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.271297 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.330072 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fxn84"] Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.335243 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqrs7\" (UniqueName: \"kubernetes.io/projected/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-kube-api-access-xqrs7\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.335395 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-utilities\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.335675 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-catalog-content\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.437326 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqrs7\" (UniqueName: \"kubernetes.io/projected/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-kube-api-access-xqrs7\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.437578 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-utilities\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.437639 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-catalog-content\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.438099 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-utilities\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.438116 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-catalog-content\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.460852 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqrs7\" (UniqueName: \"kubernetes.io/projected/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-kube-api-access-xqrs7\") pod \"community-operators-fxn84\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:25 crc kubenswrapper[4675]: I1125 13:34:25.598154 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:26 crc kubenswrapper[4675]: I1125 13:34:26.233622 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fxn84"] Nov 25 13:34:26 crc kubenswrapper[4675]: I1125 13:34:26.852175 4675 generic.go:334] "Generic (PLEG): container finished" podID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerID="92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98" exitCode=0 Nov 25 13:34:26 crc kubenswrapper[4675]: I1125 13:34:26.852372 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerDied","Data":"92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98"} Nov 25 13:34:26 crc kubenswrapper[4675]: I1125 13:34:26.853642 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerStarted","Data":"950b1aef28d672c13e8623f0fa623e6220a4fdfe2f394b9e7eb48a7f1eee9a13"} Nov 25 13:34:27 crc kubenswrapper[4675]: I1125 13:34:27.864468 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerStarted","Data":"0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476"} Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.277920 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-6lknr_ff650df4-ed32-43ee-99cf-25ea4d4b55d8/cert-manager-controller/1.log" Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.423998 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-6lknr_ff650df4-ed32-43ee-99cf-25ea4d4b55d8/cert-manager-controller/0.log" Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.523083 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mvcg9_9624922e-a281-4931-97a5-47ae5c1e78f4/cert-manager-cainjector/1.log" Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.574316 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mvcg9_9624922e-a281-4931-97a5-47ae5c1e78f4/cert-manager-cainjector/0.log" Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.702338 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-j2mvp_23b012ff-b039-47d5-84d0-276ef8ab953b/cert-manager-webhook/0.log" Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.878166 4675 generic.go:334] "Generic (PLEG): container finished" podID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerID="0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476" exitCode=0 Nov 25 13:34:28 crc kubenswrapper[4675]: I1125 13:34:28.878206 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerDied","Data":"0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476"} Nov 25 13:34:29 crc kubenswrapper[4675]: I1125 13:34:29.889354 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerStarted","Data":"ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33"} Nov 25 13:34:29 crc kubenswrapper[4675]: I1125 13:34:29.910880 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fxn84" podStartSLOduration=2.47233339 podStartE2EDuration="4.910862998s" podCreationTimestamp="2025-11-25 13:34:25 +0000 UTC" firstStartedPulling="2025-11-25 13:34:26.854744511 +0000 UTC m=+4012.026336852" lastFinishedPulling="2025-11-25 13:34:29.293274119 +0000 UTC m=+4014.464866460" observedRunningTime="2025-11-25 13:34:29.907448991 +0000 UTC m=+4015.079041332" watchObservedRunningTime="2025-11-25 13:34:29.910862998 +0000 UTC m=+4015.082455339" Nov 25 13:34:35 crc kubenswrapper[4675]: I1125 13:34:35.599111 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:35 crc kubenswrapper[4675]: I1125 13:34:35.601182 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:35 crc kubenswrapper[4675]: I1125 13:34:35.647023 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:35 crc kubenswrapper[4675]: I1125 13:34:35.990998 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:36 crc kubenswrapper[4675]: I1125 13:34:36.060197 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fxn84"] Nov 25 13:34:37 crc kubenswrapper[4675]: I1125 13:34:37.955577 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fxn84" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="registry-server" containerID="cri-o://ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33" gracePeriod=2 Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.461075 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.600420 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-catalog-content\") pod \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.601077 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-utilities\") pod \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.601123 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqrs7\" (UniqueName: \"kubernetes.io/projected/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-kube-api-access-xqrs7\") pod \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\" (UID: \"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12\") " Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.601648 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-utilities" (OuterVolumeSpecName: "utilities") pod "76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" (UID: "76e2d0d3-7e67-4467-a723-e6b3bb7f8e12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.603214 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.611020 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-kube-api-access-xqrs7" (OuterVolumeSpecName: "kube-api-access-xqrs7") pod "76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" (UID: "76e2d0d3-7e67-4467-a723-e6b3bb7f8e12"). InnerVolumeSpecName "kube-api-access-xqrs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.661939 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" (UID: "76e2d0d3-7e67-4467-a723-e6b3bb7f8e12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.705201 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.705238 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqrs7\" (UniqueName: \"kubernetes.io/projected/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12-kube-api-access-xqrs7\") on node \"crc\" DevicePath \"\"" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.966263 4675 generic.go:334] "Generic (PLEG): container finished" podID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerID="ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33" exitCode=0 Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.966305 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerDied","Data":"ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33"} Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.966326 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxn84" Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.966352 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxn84" event={"ID":"76e2d0d3-7e67-4467-a723-e6b3bb7f8e12","Type":"ContainerDied","Data":"950b1aef28d672c13e8623f0fa623e6220a4fdfe2f394b9e7eb48a7f1eee9a13"} Nov 25 13:34:38 crc kubenswrapper[4675]: I1125 13:34:38.966371 4675 scope.go:117] "RemoveContainer" containerID="ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.005299 4675 scope.go:117] "RemoveContainer" containerID="0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.007355 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fxn84"] Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.021390 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fxn84"] Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.035452 4675 scope.go:117] "RemoveContainer" containerID="92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.082463 4675 scope.go:117] "RemoveContainer" containerID="ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33" Nov 25 13:34:39 crc kubenswrapper[4675]: E1125 13:34:39.083032 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33\": container with ID starting with ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33 not found: ID does not exist" containerID="ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.083078 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33"} err="failed to get container status \"ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33\": rpc error: code = NotFound desc = could not find container \"ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33\": container with ID starting with ea2619e755fb9970aebc9e3b3db7c1335ef804a6a6cbda52c39e2d63916b5a33 not found: ID does not exist" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.083106 4675 scope.go:117] "RemoveContainer" containerID="0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476" Nov 25 13:34:39 crc kubenswrapper[4675]: E1125 13:34:39.083453 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476\": container with ID starting with 0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476 not found: ID does not exist" containerID="0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.083520 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476"} err="failed to get container status \"0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476\": rpc error: code = NotFound desc = could not find container \"0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476\": container with ID starting with 0cf990a24a5b041d030012a14c86cb3987c796d8472338c2849aaf4566c53476 not found: ID does not exist" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.083555 4675 scope.go:117] "RemoveContainer" containerID="92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98" Nov 25 13:34:39 crc kubenswrapper[4675]: E1125 13:34:39.083853 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98\": container with ID starting with 92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98 not found: ID does not exist" containerID="92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.083885 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98"} err="failed to get container status \"92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98\": rpc error: code = NotFound desc = could not find container \"92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98\": container with ID starting with 92b59cae5a4eadda4f85ac39d71cc35bc45d407fa182d3c694abd422a268fb98 not found: ID does not exist" Nov 25 13:34:39 crc kubenswrapper[4675]: I1125 13:34:39.544194 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" path="/var/lib/kubelet/pods/76e2d0d3-7e67-4467-a723-e6b3bb7f8e12/volumes" Nov 25 13:34:42 crc kubenswrapper[4675]: I1125 13:34:42.551091 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-2l8n5_c147bbe0-eb8e-44ff-b4e2-271af218f1ff/nmstate-console-plugin/0.log" Nov 25 13:34:42 crc kubenswrapper[4675]: I1125 13:34:42.677759 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-xzw2f_83ed5cc3-8ce1-4765-b975-c7a543434c95/nmstate-handler/0.log" Nov 25 13:34:42 crc kubenswrapper[4675]: I1125 13:34:42.811742 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-gwgnx_48952822-4273-4558-b07e-ad6e9e80dbdf/nmstate-metrics/0.log" Nov 25 13:34:42 crc kubenswrapper[4675]: I1125 13:34:42.852679 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-gwgnx_48952822-4273-4558-b07e-ad6e9e80dbdf/kube-rbac-proxy/0.log" Nov 25 13:34:43 crc kubenswrapper[4675]: I1125 13:34:43.021758 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-tld8t_669a3ab6-17cd-4b0e-8498-bbf3bd4041f4/nmstate-operator/0.log" Nov 25 13:34:43 crc kubenswrapper[4675]: I1125 13:34:43.039779 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-z8hd2_fff162cc-a7a0-4cbb-930f-7867fbb1cf70/nmstate-webhook/0.log" Nov 25 13:35:00 crc kubenswrapper[4675]: I1125 13:35:00.756761 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-dnrkj_c7735c55-9453-4052-8156-30e1155c73eb/kube-rbac-proxy/0.log" Nov 25 13:35:00 crc kubenswrapper[4675]: I1125 13:35:00.851404 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-dnrkj_c7735c55-9453-4052-8156-30e1155c73eb/controller/0.log" Nov 25 13:35:00 crc kubenswrapper[4675]: I1125 13:35:00.983032 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.246333 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.284774 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.287606 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.341780 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.554424 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.612370 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.612718 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.689902 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.870095 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-frr-files/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.896344 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-metrics/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.912322 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/cp-reloader/0.log" Nov 25 13:35:01 crc kubenswrapper[4675]: I1125 13:35:01.917192 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/controller/0.log" Nov 25 13:35:02 crc kubenswrapper[4675]: I1125 13:35:02.108672 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/frr-metrics/0.log" Nov 25 13:35:02 crc kubenswrapper[4675]: I1125 13:35:02.282087 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/kube-rbac-proxy-frr/0.log" Nov 25 13:35:02 crc kubenswrapper[4675]: I1125 13:35:02.463534 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/kube-rbac-proxy/0.log" Nov 25 13:35:02 crc kubenswrapper[4675]: I1125 13:35:02.545038 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/reloader/0.log" Nov 25 13:35:02 crc kubenswrapper[4675]: I1125 13:35:02.799890 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-2vqjx_f2695e9e-a774-4ab3-823a-6ea088db6ae8/frr-k8s-webhook-server/0.log" Nov 25 13:35:02 crc kubenswrapper[4675]: I1125 13:35:02.994638 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5db469f446-85gxv_eb100a90-931c-4daa-8466-49a1ae50185b/manager/3.log" Nov 25 13:35:03 crc kubenswrapper[4675]: I1125 13:35:03.051936 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5db469f446-85gxv_eb100a90-931c-4daa-8466-49a1ae50185b/manager/2.log" Nov 25 13:35:03 crc kubenswrapper[4675]: I1125 13:35:03.304411 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5745948454-t8f8s_fa47f959-6c90-4cbf-b9a2-1d1e152414da/webhook-server/0.log" Nov 25 13:35:03 crc kubenswrapper[4675]: I1125 13:35:03.384468 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-45wff_afe1f56b-a6db-4458-bdb3-a5b6f88e30b0/frr/0.log" Nov 25 13:35:03 crc kubenswrapper[4675]: I1125 13:35:03.571126 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-jwm85_c2d490ab-73e3-4ff8-a9e1-1359fa135b87/kube-rbac-proxy/0.log" Nov 25 13:35:03 crc kubenswrapper[4675]: I1125 13:35:03.913312 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-jwm85_c2d490ab-73e3-4ff8-a9e1-1359fa135b87/speaker/0.log" Nov 25 13:35:18 crc kubenswrapper[4675]: I1125 13:35:18.828915 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/util/0.log" Nov 25 13:35:18 crc kubenswrapper[4675]: I1125 13:35:18.986990 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/util/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.039410 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/pull/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.040502 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/pull/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.182580 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/util/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.220631 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/extract/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.227674 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e5s58x_5307bc14-522d-4a40-a135-8ba280d2202f/pull/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.429114 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-utilities/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.599236 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-utilities/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.614497 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-content/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.628198 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-content/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.799507 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-content/0.log" Nov 25 13:35:19 crc kubenswrapper[4675]: I1125 13:35:19.804943 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/extract-utilities/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.144090 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-utilities/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.389093 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-utilities/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.400055 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jhj2z_4d40a555-6ac6-4f2b-aab9-d9586a0607fc/registry-server/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.443568 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-content/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.471471 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-content/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.623346 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-utilities/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.652658 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/extract-content/0.log" Nov 25 13:35:20 crc kubenswrapper[4675]: I1125 13:35:20.890388 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/util/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.143454 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/pull/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.185949 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/util/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.258159 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/pull/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.417741 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qll4m_fe96f0fa-cfaf-4889-9219-3626cb45d0e0/registry-server/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.460658 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/util/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.518535 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/pull/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.589453 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c65bzmz_077791d3-2406-48df-96ea-e6c84fa68b89/extract/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.662828 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-gft6s_b0a290f6-aa83-4c86-80ba-5f48e9a78c36/marketplace-operator/0.log" Nov 25 13:35:21 crc kubenswrapper[4675]: I1125 13:35:21.805128 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-utilities/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.200126 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-utilities/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.241345 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-content/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.245277 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-content/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.418290 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-utilities/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.453315 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/extract-content/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.571940 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mbhkw_ed22e285-c4c2-403a-ace9-37402c049fae/registry-server/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.589839 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-utilities/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.796170 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-content/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.812268 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-content/0.log" Nov 25 13:35:22 crc kubenswrapper[4675]: I1125 13:35:22.871848 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-utilities/0.log" Nov 25 13:35:23 crc kubenswrapper[4675]: I1125 13:35:23.008787 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-content/0.log" Nov 25 13:35:23 crc kubenswrapper[4675]: I1125 13:35:23.026848 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/extract-utilities/0.log" Nov 25 13:35:23 crc kubenswrapper[4675]: I1125 13:35:23.272000 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vgcgs_ff144f02-0869-43a8-9371-690790fac643/registry-server/0.log" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.934322 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5fwpp"] Nov 25 13:35:28 crc kubenswrapper[4675]: E1125 13:35:28.935197 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="extract-utilities" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.935210 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="extract-utilities" Nov 25 13:35:28 crc kubenswrapper[4675]: E1125 13:35:28.935249 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="extract-content" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.935256 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="extract-content" Nov 25 13:35:28 crc kubenswrapper[4675]: E1125 13:35:28.935269 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="registry-server" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.935276 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="registry-server" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.935463 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="76e2d0d3-7e67-4467-a723-e6b3bb7f8e12" containerName="registry-server" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.937071 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:28 crc kubenswrapper[4675]: I1125 13:35:28.962218 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5fwpp"] Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.046543 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-869gq\" (UniqueName: \"kubernetes.io/projected/994ee436-eaf8-4660-bd9c-c0104dfb928d-kube-api-access-869gq\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.046896 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-utilities\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.047152 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-catalog-content\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.149054 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-utilities\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.149189 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-catalog-content\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.149275 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-869gq\" (UniqueName: \"kubernetes.io/projected/994ee436-eaf8-4660-bd9c-c0104dfb928d-kube-api-access-869gq\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.149593 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-utilities\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.149884 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-catalog-content\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.166357 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-869gq\" (UniqueName: \"kubernetes.io/projected/994ee436-eaf8-4660-bd9c-c0104dfb928d-kube-api-access-869gq\") pod \"redhat-operators-5fwpp\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.267846 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:29 crc kubenswrapper[4675]: I1125 13:35:29.791139 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5fwpp"] Nov 25 13:35:30 crc kubenswrapper[4675]: I1125 13:35:30.463881 4675 generic.go:334] "Generic (PLEG): container finished" podID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerID="fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040" exitCode=0 Nov 25 13:35:30 crc kubenswrapper[4675]: I1125 13:35:30.464159 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerDied","Data":"fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040"} Nov 25 13:35:30 crc kubenswrapper[4675]: I1125 13:35:30.464609 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerStarted","Data":"3bde3b33c67cc941a2a8885fff0fb6633095c39d205c4891b2eba97043938bfb"} Nov 25 13:35:30 crc kubenswrapper[4675]: I1125 13:35:30.466250 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:35:31 crc kubenswrapper[4675]: I1125 13:35:31.473603 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerStarted","Data":"48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12"} Nov 25 13:35:35 crc kubenswrapper[4675]: I1125 13:35:35.509653 4675 generic.go:334] "Generic (PLEG): container finished" podID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerID="48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12" exitCode=0 Nov 25 13:35:35 crc kubenswrapper[4675]: I1125 13:35:35.509717 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerDied","Data":"48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12"} Nov 25 13:35:36 crc kubenswrapper[4675]: I1125 13:35:36.523691 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerStarted","Data":"8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b"} Nov 25 13:35:39 crc kubenswrapper[4675]: I1125 13:35:39.269000 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:39 crc kubenswrapper[4675]: I1125 13:35:39.269683 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:35:40 crc kubenswrapper[4675]: I1125 13:35:40.322698 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5fwpp" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" probeResult="failure" output=< Nov 25 13:35:40 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:35:40 crc kubenswrapper[4675]: > Nov 25 13:35:50 crc kubenswrapper[4675]: I1125 13:35:50.341038 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5fwpp" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" probeResult="failure" output=< Nov 25 13:35:50 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:35:50 crc kubenswrapper[4675]: > Nov 25 13:36:00 crc kubenswrapper[4675]: I1125 13:36:00.321509 4675 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5fwpp" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" probeResult="failure" output=< Nov 25 13:36:00 crc kubenswrapper[4675]: timeout: failed to connect service ":50051" within 1s Nov 25 13:36:00 crc kubenswrapper[4675]: > Nov 25 13:36:09 crc kubenswrapper[4675]: I1125 13:36:09.312007 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:36:09 crc kubenswrapper[4675]: I1125 13:36:09.335639 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5fwpp" podStartSLOduration=35.655362085 podStartE2EDuration="41.33561412s" podCreationTimestamp="2025-11-25 13:35:28 +0000 UTC" firstStartedPulling="2025-11-25 13:35:30.466016909 +0000 UTC m=+4075.637609250" lastFinishedPulling="2025-11-25 13:35:36.146268944 +0000 UTC m=+4081.317861285" observedRunningTime="2025-11-25 13:35:36.553387916 +0000 UTC m=+4081.724980257" watchObservedRunningTime="2025-11-25 13:36:09.33561412 +0000 UTC m=+4114.507206471" Nov 25 13:36:09 crc kubenswrapper[4675]: I1125 13:36:09.374861 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:36:09 crc kubenswrapper[4675]: I1125 13:36:09.559287 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5fwpp"] Nov 25 13:36:10 crc kubenswrapper[4675]: I1125 13:36:10.867571 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5fwpp" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" containerID="cri-o://8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b" gracePeriod=2 Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.347457 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.436982 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-utilities\") pod \"994ee436-eaf8-4660-bd9c-c0104dfb928d\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.437161 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-catalog-content\") pod \"994ee436-eaf8-4660-bd9c-c0104dfb928d\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.437219 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-869gq\" (UniqueName: \"kubernetes.io/projected/994ee436-eaf8-4660-bd9c-c0104dfb928d-kube-api-access-869gq\") pod \"994ee436-eaf8-4660-bd9c-c0104dfb928d\" (UID: \"994ee436-eaf8-4660-bd9c-c0104dfb928d\") " Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.438745 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-utilities" (OuterVolumeSpecName: "utilities") pod "994ee436-eaf8-4660-bd9c-c0104dfb928d" (UID: "994ee436-eaf8-4660-bd9c-c0104dfb928d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.444417 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/994ee436-eaf8-4660-bd9c-c0104dfb928d-kube-api-access-869gq" (OuterVolumeSpecName: "kube-api-access-869gq") pod "994ee436-eaf8-4660-bd9c-c0104dfb928d" (UID: "994ee436-eaf8-4660-bd9c-c0104dfb928d"). InnerVolumeSpecName "kube-api-access-869gq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.524339 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "994ee436-eaf8-4660-bd9c-c0104dfb928d" (UID: "994ee436-eaf8-4660-bd9c-c0104dfb928d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.539648 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.539680 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/994ee436-eaf8-4660-bd9c-c0104dfb928d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.539690 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-869gq\" (UniqueName: \"kubernetes.io/projected/994ee436-eaf8-4660-bd9c-c0104dfb928d-kube-api-access-869gq\") on node \"crc\" DevicePath \"\"" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.876974 4675 generic.go:334] "Generic (PLEG): container finished" podID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerID="8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b" exitCode=0 Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.877011 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerDied","Data":"8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b"} Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.877038 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fwpp" event={"ID":"994ee436-eaf8-4660-bd9c-c0104dfb928d","Type":"ContainerDied","Data":"3bde3b33c67cc941a2a8885fff0fb6633095c39d205c4891b2eba97043938bfb"} Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.877055 4675 scope.go:117] "RemoveContainer" containerID="8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.877273 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fwpp" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.912576 4675 scope.go:117] "RemoveContainer" containerID="48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12" Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.914304 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5fwpp"] Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.936452 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5fwpp"] Nov 25 13:36:11 crc kubenswrapper[4675]: I1125 13:36:11.997992 4675 scope.go:117] "RemoveContainer" containerID="fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040" Nov 25 13:36:12 crc kubenswrapper[4675]: I1125 13:36:12.077933 4675 scope.go:117] "RemoveContainer" containerID="8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b" Nov 25 13:36:12 crc kubenswrapper[4675]: E1125 13:36:12.079230 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b\": container with ID starting with 8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b not found: ID does not exist" containerID="8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b" Nov 25 13:36:12 crc kubenswrapper[4675]: I1125 13:36:12.079260 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b"} err="failed to get container status \"8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b\": rpc error: code = NotFound desc = could not find container \"8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b\": container with ID starting with 8db43abc3773e961819289a13d7532a68d9402385033b0cc07a762a676567c6b not found: ID does not exist" Nov 25 13:36:12 crc kubenswrapper[4675]: I1125 13:36:12.079285 4675 scope.go:117] "RemoveContainer" containerID="48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12" Nov 25 13:36:12 crc kubenswrapper[4675]: E1125 13:36:12.082088 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12\": container with ID starting with 48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12 not found: ID does not exist" containerID="48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12" Nov 25 13:36:12 crc kubenswrapper[4675]: I1125 13:36:12.082118 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12"} err="failed to get container status \"48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12\": rpc error: code = NotFound desc = could not find container \"48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12\": container with ID starting with 48f5f5bac67b77368017e596c3aee7165df45357e2c9484609df3b3b22824c12 not found: ID does not exist" Nov 25 13:36:12 crc kubenswrapper[4675]: I1125 13:36:12.082139 4675 scope.go:117] "RemoveContainer" containerID="fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040" Nov 25 13:36:12 crc kubenswrapper[4675]: E1125 13:36:12.086114 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040\": container with ID starting with fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040 not found: ID does not exist" containerID="fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040" Nov 25 13:36:12 crc kubenswrapper[4675]: I1125 13:36:12.086166 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040"} err="failed to get container status \"fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040\": rpc error: code = NotFound desc = could not find container \"fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040\": container with ID starting with fb0e4cb447c9c96b8937c22d7f198bde7bd9927d46a93a99ef32bb2ae9ae0040 not found: ID does not exist" Nov 25 13:36:13 crc kubenswrapper[4675]: I1125 13:36:13.544796 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" path="/var/lib/kubelet/pods/994ee436-eaf8-4660-bd9c-c0104dfb928d/volumes" Nov 25 13:36:13 crc kubenswrapper[4675]: I1125 13:36:13.661851 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:36:13 crc kubenswrapper[4675]: I1125 13:36:13.661913 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:36:43 crc kubenswrapper[4675]: I1125 13:36:43.661972 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:36:43 crc kubenswrapper[4675]: I1125 13:36:43.662496 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:37:13 crc kubenswrapper[4675]: I1125 13:37:13.662212 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:37:13 crc kubenswrapper[4675]: I1125 13:37:13.662916 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:37:13 crc kubenswrapper[4675]: I1125 13:37:13.663000 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:37:13 crc kubenswrapper[4675]: I1125 13:37:13.664304 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"20662e96e925ea53ed9c61563d7d26afa7fc94d988ed906d74c50474d5b3fc65"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:37:13 crc kubenswrapper[4675]: I1125 13:37:13.664467 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://20662e96e925ea53ed9c61563d7d26afa7fc94d988ed906d74c50474d5b3fc65" gracePeriod=600 Nov 25 13:37:14 crc kubenswrapper[4675]: I1125 13:37:14.491800 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="20662e96e925ea53ed9c61563d7d26afa7fc94d988ed906d74c50474d5b3fc65" exitCode=0 Nov 25 13:37:14 crc kubenswrapper[4675]: I1125 13:37:14.491872 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"20662e96e925ea53ed9c61563d7d26afa7fc94d988ed906d74c50474d5b3fc65"} Nov 25 13:37:14 crc kubenswrapper[4675]: I1125 13:37:14.492212 4675 scope.go:117] "RemoveContainer" containerID="5698493281c8bc28047c9ded3827517f0c93e64e0df3f708dc8680a29e655e6d" Nov 25 13:37:15 crc kubenswrapper[4675]: I1125 13:37:15.504006 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerStarted","Data":"dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8"} Nov 25 13:37:23 crc kubenswrapper[4675]: I1125 13:37:23.599309 4675 generic.go:334] "Generic (PLEG): container finished" podID="06df0284-68d4-4144-a705-c62f63ef2a32" containerID="1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87" exitCode=0 Nov 25 13:37:23 crc kubenswrapper[4675]: I1125 13:37:23.599388 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" event={"ID":"06df0284-68d4-4144-a705-c62f63ef2a32","Type":"ContainerDied","Data":"1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87"} Nov 25 13:37:23 crc kubenswrapper[4675]: I1125 13:37:23.600377 4675 scope.go:117] "RemoveContainer" containerID="1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87" Nov 25 13:37:24 crc kubenswrapper[4675]: I1125 13:37:24.073872 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bzvbd_must-gather-rwf9k_06df0284-68d4-4144-a705-c62f63ef2a32/gather/0.log" Nov 25 13:37:34 crc kubenswrapper[4675]: I1125 13:37:34.971509 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bzvbd/must-gather-rwf9k"] Nov 25 13:37:34 crc kubenswrapper[4675]: I1125 13:37:34.972357 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="copy" containerID="cri-o://fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f" gracePeriod=2 Nov 25 13:37:34 crc kubenswrapper[4675]: I1125 13:37:34.980544 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bzvbd/must-gather-rwf9k"] Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.563482 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bzvbd_must-gather-rwf9k_06df0284-68d4-4144-a705-c62f63ef2a32/copy/0.log" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.565002 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.639998 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smpv5\" (UniqueName: \"kubernetes.io/projected/06df0284-68d4-4144-a705-c62f63ef2a32-kube-api-access-smpv5\") pod \"06df0284-68d4-4144-a705-c62f63ef2a32\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.640157 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/06df0284-68d4-4144-a705-c62f63ef2a32-must-gather-output\") pod \"06df0284-68d4-4144-a705-c62f63ef2a32\" (UID: \"06df0284-68d4-4144-a705-c62f63ef2a32\") " Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.648123 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06df0284-68d4-4144-a705-c62f63ef2a32-kube-api-access-smpv5" (OuterVolumeSpecName: "kube-api-access-smpv5") pod "06df0284-68d4-4144-a705-c62f63ef2a32" (UID: "06df0284-68d4-4144-a705-c62f63ef2a32"). InnerVolumeSpecName "kube-api-access-smpv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.704586 4675 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bzvbd_must-gather-rwf9k_06df0284-68d4-4144-a705-c62f63ef2a32/copy/0.log" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.705661 4675 generic.go:334] "Generic (PLEG): container finished" podID="06df0284-68d4-4144-a705-c62f63ef2a32" containerID="fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f" exitCode=143 Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.705715 4675 scope.go:117] "RemoveContainer" containerID="fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.705729 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bzvbd/must-gather-rwf9k" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.742672 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smpv5\" (UniqueName: \"kubernetes.io/projected/06df0284-68d4-4144-a705-c62f63ef2a32-kube-api-access-smpv5\") on node \"crc\" DevicePath \"\"" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.748619 4675 scope.go:117] "RemoveContainer" containerID="1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.818121 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06df0284-68d4-4144-a705-c62f63ef2a32-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "06df0284-68d4-4144-a705-c62f63ef2a32" (UID: "06df0284-68d4-4144-a705-c62f63ef2a32"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.847168 4675 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/06df0284-68d4-4144-a705-c62f63ef2a32-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.866689 4675 scope.go:117] "RemoveContainer" containerID="fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f" Nov 25 13:37:35 crc kubenswrapper[4675]: E1125 13:37:35.868227 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f\": container with ID starting with fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f not found: ID does not exist" containerID="fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.868264 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f"} err="failed to get container status \"fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f\": rpc error: code = NotFound desc = could not find container \"fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f\": container with ID starting with fc76c00cd6d3116ffa3db9aafc64c7bbb48bb9e7b14c6894e7f9cee23153af3f not found: ID does not exist" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.868285 4675 scope.go:117] "RemoveContainer" containerID="1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87" Nov 25 13:37:35 crc kubenswrapper[4675]: E1125 13:37:35.876956 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87\": container with ID starting with 1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87 not found: ID does not exist" containerID="1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87" Nov 25 13:37:35 crc kubenswrapper[4675]: I1125 13:37:35.876997 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87"} err="failed to get container status \"1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87\": rpc error: code = NotFound desc = could not find container \"1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87\": container with ID starting with 1c6321502c2166e3833219568d64d1916323882eddd77b7001fdc76936990d87 not found: ID does not exist" Nov 25 13:37:37 crc kubenswrapper[4675]: I1125 13:37:37.557631 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" path="/var/lib/kubelet/pods/06df0284-68d4-4144-a705-c62f63ef2a32/volumes" Nov 25 13:39:43 crc kubenswrapper[4675]: I1125 13:39:43.662781 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:39:43 crc kubenswrapper[4675]: I1125 13:39:43.663280 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:40:13 crc kubenswrapper[4675]: I1125 13:40:13.662051 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:40:13 crc kubenswrapper[4675]: I1125 13:40:13.662672 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:40:43 crc kubenswrapper[4675]: I1125 13:40:43.662470 4675 patch_prober.go:28] interesting pod/machine-config-daemon-n7t8r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 13:40:43 crc kubenswrapper[4675]: I1125 13:40:43.662986 4675 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 13:40:43 crc kubenswrapper[4675]: I1125 13:40:43.663058 4675 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" Nov 25 13:40:43 crc kubenswrapper[4675]: I1125 13:40:43.663784 4675 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8"} pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 13:40:43 crc kubenswrapper[4675]: I1125 13:40:43.663871 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerName="machine-config-daemon" containerID="cri-o://dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8" gracePeriod=600 Nov 25 13:40:43 crc kubenswrapper[4675]: E1125 13:40:43.790114 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:40:44 crc kubenswrapper[4675]: I1125 13:40:44.217327 4675 generic.go:334] "Generic (PLEG): container finished" podID="e2e07bd2-ea2f-48da-9358-49fed47fa922" containerID="dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8" exitCode=0 Nov 25 13:40:44 crc kubenswrapper[4675]: I1125 13:40:44.217413 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" event={"ID":"e2e07bd2-ea2f-48da-9358-49fed47fa922","Type":"ContainerDied","Data":"dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8"} Nov 25 13:40:44 crc kubenswrapper[4675]: I1125 13:40:44.217600 4675 scope.go:117] "RemoveContainer" containerID="20662e96e925ea53ed9c61563d7d26afa7fc94d988ed906d74c50474d5b3fc65" Nov 25 13:40:44 crc kubenswrapper[4675]: I1125 13:40:44.219410 4675 scope.go:117] "RemoveContainer" containerID="dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8" Nov 25 13:40:44 crc kubenswrapper[4675]: E1125 13:40:44.219809 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.820729 4675 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2mqb2"] Nov 25 13:40:56 crc kubenswrapper[4675]: E1125 13:40:56.821834 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="gather" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.821855 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="gather" Nov 25 13:40:56 crc kubenswrapper[4675]: E1125 13:40:56.821873 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="copy" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.821882 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="copy" Nov 25 13:40:56 crc kubenswrapper[4675]: E1125 13:40:56.821894 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="extract-content" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.821902 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="extract-content" Nov 25 13:40:56 crc kubenswrapper[4675]: E1125 13:40:56.821926 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="extract-utilities" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.821933 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="extract-utilities" Nov 25 13:40:56 crc kubenswrapper[4675]: E1125 13:40:56.821943 4675 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.821950 4675 state_mem.go:107] "Deleted CPUSet assignment" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.822194 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="copy" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.822220 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="994ee436-eaf8-4660-bd9c-c0104dfb928d" containerName="registry-server" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.822235 4675 memory_manager.go:354] "RemoveStaleState removing state" podUID="06df0284-68d4-4144-a705-c62f63ef2a32" containerName="gather" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.824050 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:56 crc kubenswrapper[4675]: I1125 13:40:56.841463 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2mqb2"] Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.012929 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-catalog-content\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.013004 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-utilities\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.013045 4675 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxzjd\" (UniqueName: \"kubernetes.io/projected/1284aa9d-35b4-4712-92e5-d75edda38296-kube-api-access-bxzjd\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.114770 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-catalog-content\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.114832 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-utilities\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.114852 4675 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxzjd\" (UniqueName: \"kubernetes.io/projected/1284aa9d-35b4-4712-92e5-d75edda38296-kube-api-access-bxzjd\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.115302 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-catalog-content\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.115388 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-utilities\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.133400 4675 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxzjd\" (UniqueName: \"kubernetes.io/projected/1284aa9d-35b4-4712-92e5-d75edda38296-kube-api-access-bxzjd\") pod \"redhat-marketplace-2mqb2\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.172882 4675 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.534152 4675 scope.go:117] "RemoveContainer" containerID="dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8" Nov 25 13:40:57 crc kubenswrapper[4675]: E1125 13:40:57.534776 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:40:57 crc kubenswrapper[4675]: I1125 13:40:57.765221 4675 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2mqb2"] Nov 25 13:40:57 crc kubenswrapper[4675]: W1125 13:40:57.765502 4675 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1284aa9d_35b4_4712_92e5_d75edda38296.slice/crio-cb2e1149ff92ebc1973dd23d590e2cd4d4dc2bbac20c590e3dc6e262a29c4fe6 WatchSource:0}: Error finding container cb2e1149ff92ebc1973dd23d590e2cd4d4dc2bbac20c590e3dc6e262a29c4fe6: Status 404 returned error can't find the container with id cb2e1149ff92ebc1973dd23d590e2cd4d4dc2bbac20c590e3dc6e262a29c4fe6 Nov 25 13:40:58 crc kubenswrapper[4675]: I1125 13:40:58.367531 4675 generic.go:334] "Generic (PLEG): container finished" podID="1284aa9d-35b4-4712-92e5-d75edda38296" containerID="4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f" exitCode=0 Nov 25 13:40:58 crc kubenswrapper[4675]: I1125 13:40:58.367641 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2mqb2" event={"ID":"1284aa9d-35b4-4712-92e5-d75edda38296","Type":"ContainerDied","Data":"4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f"} Nov 25 13:40:58 crc kubenswrapper[4675]: I1125 13:40:58.368048 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2mqb2" event={"ID":"1284aa9d-35b4-4712-92e5-d75edda38296","Type":"ContainerStarted","Data":"cb2e1149ff92ebc1973dd23d590e2cd4d4dc2bbac20c590e3dc6e262a29c4fe6"} Nov 25 13:40:58 crc kubenswrapper[4675]: I1125 13:40:58.371788 4675 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 13:41:00 crc kubenswrapper[4675]: I1125 13:41:00.398552 4675 generic.go:334] "Generic (PLEG): container finished" podID="1284aa9d-35b4-4712-92e5-d75edda38296" containerID="1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4" exitCode=0 Nov 25 13:41:00 crc kubenswrapper[4675]: I1125 13:41:00.398606 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2mqb2" event={"ID":"1284aa9d-35b4-4712-92e5-d75edda38296","Type":"ContainerDied","Data":"1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4"} Nov 25 13:41:01 crc kubenswrapper[4675]: I1125 13:41:01.413998 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2mqb2" event={"ID":"1284aa9d-35b4-4712-92e5-d75edda38296","Type":"ContainerStarted","Data":"e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669"} Nov 25 13:41:01 crc kubenswrapper[4675]: I1125 13:41:01.432801 4675 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2mqb2" podStartSLOduration=2.977281548 podStartE2EDuration="5.432782278s" podCreationTimestamp="2025-11-25 13:40:56 +0000 UTC" firstStartedPulling="2025-11-25 13:40:58.37124928 +0000 UTC m=+4403.542841661" lastFinishedPulling="2025-11-25 13:41:00.82675005 +0000 UTC m=+4405.998342391" observedRunningTime="2025-11-25 13:41:01.430451174 +0000 UTC m=+4406.602043525" watchObservedRunningTime="2025-11-25 13:41:01.432782278 +0000 UTC m=+4406.604374619" Nov 25 13:41:07 crc kubenswrapper[4675]: I1125 13:41:07.173249 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:41:07 crc kubenswrapper[4675]: I1125 13:41:07.173851 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:41:07 crc kubenswrapper[4675]: I1125 13:41:07.225121 4675 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:41:07 crc kubenswrapper[4675]: I1125 13:41:07.504791 4675 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:41:07 crc kubenswrapper[4675]: I1125 13:41:07.556249 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2mqb2"] Nov 25 13:41:09 crc kubenswrapper[4675]: I1125 13:41:09.477105 4675 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2mqb2" podUID="1284aa9d-35b4-4712-92e5-d75edda38296" containerName="registry-server" containerID="cri-o://e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669" gracePeriod=2 Nov 25 13:41:09 crc kubenswrapper[4675]: I1125 13:41:09.533186 4675 scope.go:117] "RemoveContainer" containerID="dbfc2d715c5c9bd330f17a37732d8162bfa41a01c22bf2384535b6ade27b68a8" Nov 25 13:41:09 crc kubenswrapper[4675]: E1125 13:41:09.533435 4675 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n7t8r_openshift-machine-config-operator(e2e07bd2-ea2f-48da-9358-49fed47fa922)\"" pod="openshift-machine-config-operator/machine-config-daemon-n7t8r" podUID="e2e07bd2-ea2f-48da-9358-49fed47fa922" Nov 25 13:41:09 crc kubenswrapper[4675]: I1125 13:41:09.960970 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.066746 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxzjd\" (UniqueName: \"kubernetes.io/projected/1284aa9d-35b4-4712-92e5-d75edda38296-kube-api-access-bxzjd\") pod \"1284aa9d-35b4-4712-92e5-d75edda38296\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.066856 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-catalog-content\") pod \"1284aa9d-35b4-4712-92e5-d75edda38296\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.066988 4675 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-utilities\") pod \"1284aa9d-35b4-4712-92e5-d75edda38296\" (UID: \"1284aa9d-35b4-4712-92e5-d75edda38296\") " Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.068601 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-utilities" (OuterVolumeSpecName: "utilities") pod "1284aa9d-35b4-4712-92e5-d75edda38296" (UID: "1284aa9d-35b4-4712-92e5-d75edda38296"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.083580 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1284aa9d-35b4-4712-92e5-d75edda38296-kube-api-access-bxzjd" (OuterVolumeSpecName: "kube-api-access-bxzjd") pod "1284aa9d-35b4-4712-92e5-d75edda38296" (UID: "1284aa9d-35b4-4712-92e5-d75edda38296"). InnerVolumeSpecName "kube-api-access-bxzjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.125223 4675 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1284aa9d-35b4-4712-92e5-d75edda38296" (UID: "1284aa9d-35b4-4712-92e5-d75edda38296"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.169020 4675 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxzjd\" (UniqueName: \"kubernetes.io/projected/1284aa9d-35b4-4712-92e5-d75edda38296-kube-api-access-bxzjd\") on node \"crc\" DevicePath \"\"" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.169058 4675 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.169071 4675 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284aa9d-35b4-4712-92e5-d75edda38296-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.488417 4675 generic.go:334] "Generic (PLEG): container finished" podID="1284aa9d-35b4-4712-92e5-d75edda38296" containerID="e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669" exitCode=0 Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.488456 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2mqb2" event={"ID":"1284aa9d-35b4-4712-92e5-d75edda38296","Type":"ContainerDied","Data":"e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669"} Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.488480 4675 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2mqb2" event={"ID":"1284aa9d-35b4-4712-92e5-d75edda38296","Type":"ContainerDied","Data":"cb2e1149ff92ebc1973dd23d590e2cd4d4dc2bbac20c590e3dc6e262a29c4fe6"} Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.488481 4675 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2mqb2" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.488495 4675 scope.go:117] "RemoveContainer" containerID="e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.522408 4675 scope.go:117] "RemoveContainer" containerID="1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.528695 4675 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2mqb2"] Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.538723 4675 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2mqb2"] Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.551848 4675 scope.go:117] "RemoveContainer" containerID="4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.591348 4675 scope.go:117] "RemoveContainer" containerID="e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669" Nov 25 13:41:10 crc kubenswrapper[4675]: E1125 13:41:10.591937 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669\": container with ID starting with e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669 not found: ID does not exist" containerID="e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.592005 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669"} err="failed to get container status \"e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669\": rpc error: code = NotFound desc = could not find container \"e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669\": container with ID starting with e009f5729472e18f160cf6b5c7fb0be0c978a624e3cf2425b78e8b5b5a6b6669 not found: ID does not exist" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.592037 4675 scope.go:117] "RemoveContainer" containerID="1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4" Nov 25 13:41:10 crc kubenswrapper[4675]: E1125 13:41:10.592346 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4\": container with ID starting with 1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4 not found: ID does not exist" containerID="1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.592375 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4"} err="failed to get container status \"1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4\": rpc error: code = NotFound desc = could not find container \"1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4\": container with ID starting with 1ff600a3212c4a6e5bbc3e792523b3b3b9ffa1feb4850671791a4f69534732a4 not found: ID does not exist" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.592393 4675 scope.go:117] "RemoveContainer" containerID="4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f" Nov 25 13:41:10 crc kubenswrapper[4675]: E1125 13:41:10.592664 4675 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f\": container with ID starting with 4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f not found: ID does not exist" containerID="4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f" Nov 25 13:41:10 crc kubenswrapper[4675]: I1125 13:41:10.592709 4675 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f"} err="failed to get container status \"4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f\": rpc error: code = NotFound desc = could not find container \"4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f\": container with ID starting with 4fc823efd648dbefe4afe847b84bd69cfc6a049e26e6751d8da7be2c8bce684f not found: ID does not exist" Nov 25 13:41:11 crc kubenswrapper[4675]: I1125 13:41:11.544583 4675 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1284aa9d-35b4-4712-92e5-d75edda38296" path="/var/lib/kubelet/pods/1284aa9d-35b4-4712-92e5-d75edda38296/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111331002024430 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111331002017345 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111317667016515 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111317667015465 5ustar corecore